var/home/core/zuul-output/0000755000175000017500000000000015134444730014532 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015134465145015501 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000313625415134465016020271 0ustar corecorejrikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD e?K"mv?_eGbuuțx{w7ݭ7֫'~𒆷7̗8zTY\].f}嗷ovϷw_>on3cvX~egQBeH,nWb m/m}*L~AzHev_uαHJ2E$(Ͽ|/+k*z>p R⥑gF)49)(oՈ7_k0m^p9PneQn͂YEeeɹ ^ʙ|ʕ0MۂAraZR}@E1%]˜(O)X(6I;Ff"mcI۫d@FNsdxό?2$&tg*Y%\ߘfDP'F%Ab*d@e˛H,љ:72 2ƴ40tr>PYD'vt'oI¢w}o٬owko%gQ(%t#NL֜ eh&Ƨ,RH 4*,!SD 1Ed_wkxdL3F;/u7Taqu5Ոӄp\2dd$YLYG(#?%U?hB\;ErE& SOZXHBWy|iZ~hal\t2Hgb*t--ߖ|Hp(-J C?>:zR{܃ lM6_OފߍO1nԝG?ƥF%QV5pDVHwԡ/.2h{qۀK8yUOdssdMvw`21ɻ]/ƛ"@8(PN_,_0;o_x+Vy<h\dN9:bġ7 -Pwȹl;M@n̞Qj_P\ Q]GcPN;e7Vtś98m1<:|a+.:a4nՒ,]LF0);I$>ga5"f[B[fhT/ɾgm\Sj#3hEEH*Nf äE@O0~y[쾋t=iYhșC 5ܩa!ǛfGtzz*з 55E9Fa?Zk80ݞN|:AОNo;Ⱦzu\0Ac/T%;m ~S`#u.Џ1qNp&gK60nqtƅ": C@!P q]G0,d%1}Uhs;H?)M"뛲@.Cs*H _0:P.BvJ>mIyVVTF% tFL-*$tZm2AČAE9ϯ~ihFf&6,֗&̴+s~x?53!}~Z[F)RH?uvͪ _5l *7h?cF_]CNnW)F5d,0SSNK9ް4:ÒozsB<^+鄌4:B%cXhK I}!5 YM%o<>"ہ)Za@Ι}YJz{ɛr|hxY/O$Zøu32EʉD'MS1}t i:Y`cФIX0$lη˽`!i:ګPSPٔ3@5;ȕ}PkڪH9' |":", 1Ҫ8 %lg&:2JC!Mjܽ#`PJWP4Q2:IGӸۡshN+60#:mufe߿~Y,iǑ wVq*T+ w%fx6 %u̩1hӰc%AYW ZY~a_6_yWf`rVA,f=A}h&V<UKkZ{iqi :íy˧FR1u)X9 f΁U ~5batx|ELU:T'T[G*ݧ ؽZK̡O6rLmȰ (T$ n#b@hpj:˾ojs)M/8`$:) X+ҧSaۥzw}^P1J%+P:Dsƫ%z; +g 0հc0E) 3jƯ?e|miȄ{g6R/wD_tՄ.F+HP'AE; J j"b~\b$BrW XWz<%fpG"m%6PGEH^*JL֗J)oEv[Ң߃x[䚒}0BOnYr猸p$nu?ݣ RF]NHw2k혿q}lrCy u)xF$Z83Ec罋}[εUX%}< ݻln"sv&{b%^AAoۺ(I#hKD:Bߩ#蘈f=9oN*.Ѓ M#JC1?tean`3-SHq$2[ĜSjXRx?}-m6Mw'yR3q㕐)HW'X1BEb $xd(21i)//_і/Cޮm0VKz>I; >d[5Z=4>5!!T@[4 1.x XF`,?Hh]b-#3J( &uz u8.00-(9ŽZcX Jٯ^蒋*k.\MA/Xp9VqNo}#ƓOފgv[r*hy| IϭR-$$m!-W'wTi:4F5^z3/[{1LK[2nM|[<\t=3^qOp4y}|B}yu}뚬"P.ԘBn방u<#< A Q(j%e1!gkqiP(-ʢ-b7$66|*f\#ߍp{8sx[o%}wS`ýͽ>^U_S1VF20:d T2$47mSl*#lzFP_3yb.63>NKnJۦ^4*rB쑓:5Ǧ٨C.1`mU]+y_:,eXX맻c5ޖSwe݊O4L)69 War)|VϟT;Cq%KK-*i ѩQٰ`DݎGu( 꿢\cXn }7Ҫa nG{Y bcWa?\34 P U!7 _* kTuwmUr%ԀjƮĀdU#^ۈӕ3ΊeBO`^}ܖj49lnAvoI "%\;OF& wctغBܮl##mϸ.6p5k0C5PdKB g:=G<$w 24 6e/!~߽f)Q UbshY5mseڠ5_m4(sgz1v&YN2姟d4"?oWNW݃yh~%DTt^W7q.@ L⃳662G,:* $: e~7[/P%F on~$dƹɥO"dޢt|BpYqc@P`ڄj҆anCѢMU sf`Yɇك]@Rɯ?ٽf? ntպ$ˣ>TDNIGW .Z#evkVv._WiQJͮݔ\Zťz;sh4BΈ l8f(q*72"DB&&-ʈ'cC<,֒J2)p|ݛwu0{ѩ2ْM4tޖӳM\Qe%*?vQ~W  yr3-2+=Щp!k2wu_~c9'\ѻ|y7*nD4qL~`|%4Q0q["< HK'f dt(d/ZoQ%_}~Yki7}SWekk̗E\e'h􇋲rTG_77:0@Iuʙ?&Ԕ8e,žLG"1lͧQѶGM]}yxZl 0JM"d.=`Yƚ^"J?}>8ϵq\FOXƀf qbTLhlw?8p@{]oOtsϑ`94t1!F PI;i`ޮMLX7sTGP7^s08p15w q o(uLYQB_dWoc0a#K1P,8]P)\wEZ(VҠQBT^e^0F;)CtT+{`Bh"% !.bBQPnT4ƈRa[F=3}+BVE~8R{3,>0|:,5j358W]>!Q1"6oT[ҟ^T;725Xa+wqlR)<#!9!籈K*:!@NI^S"H=ofLx _lp ꖚӜ3C 4dM @x>ۙZh _uoֺip&1ڙʪ4\RF_04H8@>fXmpLJ5jRS}_D U4x[c) ,`̔Dvckk5Ťã0le۞]o~oW(91ݧ$uxp/Cq6Un9%ZxðvGL qG $ X:w06 E=oWlzN7st˪C:?*|kިfc]| &ب^[%F%LI<0(씖;4A\`TQ.b0NH;ݹ/n -3!: _Jq#Bh^4p|-G7|ڸ=Bx)kre_f |Nm8p5H!jR@Aiߒ߈ۥLFTk"5l9O'ϓl5x|_®&&n]#r̥jOڧK)lsXg\{Md-% >~Ӈ/( [ycy`ðSmn_O;3=Av3LA׊onxlM?~n Θ5 ӂxzPMcVQ@ӤomY42nrQ\'"P؝J7g+#!k{paqTԫ?o?VU}aK q;T0zqaj0"2p؋9~bޏt>$AZLk;3qUlWU Ry==qޕ6ql?N/e1N2i ۓ,j|z6OSu;BKŨʐPqO K\{jDiy@}b|Z79ߜih(+PKO;!o\戔-QB EM;oH$$]?4~YrXY%Ο@oHwlXiW\ΡbN}l4VX|"0]! YcVi)@kF;'ta%*xU㔸,A|@WJfVP6`ڼ3qY.[U BTR0u$$hG$0NpF]\ݗe$?# #:001w<{{B\rhGg JGIެE.:zYrY{*2lVǻXEB6;5NE#eb3aīNLd&@yz\?))H;h\ߍ5S&(w9Z,K44|<#EkqTkOtW]﮶f=.*LD6%#-tңx%>MZ'0-bB$ !)6@I<#`L8턻r\Kuz*]}%b<$$^LJ<\HGbIqܢcZW {jfѐ6 QڣPt[:GfCN ILhbB.*IH7xʹǙMVA*J'W)@9 Ѷ6jىY* 85{pMX+]o$h{KrҎl 5sÁbNW\: "HK<bdYL_Dd)VpA@A i"j<鮗 qwc&dXV0e[g#B4x╙✑3'-i{SEȢbK6}{Ⱥi!ma0o xI0&" 9cT)0ߢ5ڦ==!LgdJΆmΉO]T"DĊKٙ@qP,i Nl:6'5R.j,&tK*iOFsk6[E__0pw=͠qj@o5iX0v\fk= ;H J/,t%Rwó^;n1z"8 P޿[V!ye]VZRԾ|“qNpѓVZD2"VN-m2do9 'H*IM}J ZaG%qn*WE^k1v3ڣjm7>ƽl' ,Τ9)%@ wl42iG.y3bBA{pR A ?IEY ?|-nz#}~f ‰dŷ=ɀ,m7VyIwGHέ 2tޞߛM{FL\#a s.3\}*=#uL#]  GE|FKi3&,ۓxmF͉lG$mN$!;ߑl5O$}D~5| 01 S?tq6cl]M[I5'ոfiҞ:Z YՑ"jyKWk^dd@U_a4/vvV qHMI{+']1m]<$*YP7g# s!8!ߐ>'4k7/KwΦθW'?~>x0_>9Hhs%y{#iUI[Gzďx7OnuKRv'm;/~n-KI`5-'YݦD-!+Y򼤙&m^YAKC˴vҢ]+X`iDf?U7_nMBLϸY&0Ro6Qžl+nݷ" 㬙g|ӱFB@qNx^eCSW3\ZSA !c/!b"'9k I S2=bgj쯏W?=`}H0--VV#YmKW^[?R$+ +cU )?wW@!j-gw2ŝl1!iaI%~`{Tռl>~,?5D K\gd(ZH8@x~5w.4\h(`dc)}1Kqi4~'p!;_V>&M!s}FDͳ֧0O*Vr/tdQu!4YhdqT nXeb|Ivż7>! &ĊL:}3*8&6f5 %>~R݄}WgѨ@OĹCtWai4AY!XH _pw騋[b[%/d>. !Df~;)(Oy )r#.<]]i-*ػ-f24qlT1  jL>1qY|\䛧\|r>Ch}Ϊ=jnk?p ^C8"M#Eޑ-5@f,|Ά(Շ*(XCK*"pXR[كrq IH!6=Ocnи%G"|ڔ^kПy׏<:n:!d#[7>^.hd/}ӾP'k2MؤYy/{!ca /^wT j˚ب|MLE7Ee/I lu//j8MoGqdDt^_Y\-8!ד|$@D.ݮl`p48io^.š{_f>O)J=iwwӑ؇n-i3,1׿5'odۆ3(h>1UW蚍R$W/{&Ά+4*Iqv~L4Ykja?BH 8yӪΚkd7p| G;;74^l{PȱbA Հ}xcSMTf3J1 R(*ȴd.^Q('@l7>iKitL L+BT҂ll魳cf[$ F!c%؎:ٕTzrKh! bK@* *#QӃ\sA֎Xpljlv ^,wf2?Ǡ*_lDn}raMyFp*ITtD_GU9V qN8V ['mU)d!FmaX-6NAGT ,Tc3ѱ ԶJ'{|Xɚ(:FM/3$*v%bq@@ʗrǚ8ceǝyWLiXCaSmMÉBgr7[ Nʇ)bAgX'f0_]agB-:YSk]dE6c>Ql~J6J#` Eh3ŕS,|HVh7m]Q!ӥSVL l)vJ |0M>8l WIf|\8U*hг˙r,3l'^  [}r?}W3Q#vS}ll>ŰAVG YңK\rnɪq(u$Bk|TUН}h!8l?= S7!8bdEuK<^F hxndSD,Xt]1Gm*h%)(=XUra^&6"BzƾH( ."u>.,SzbQ!g:l0r$ضz]'.!-r"1MCMu(KP|еcLqr/Hi%(azry !5yY~ :t>g:Y #)#vǷ!BY)Hc(<|q@OIW^9oJbN;sö!`hDZod@JLw9|fb>ѺRce w0/ qg%HWc\4o>&lrwj|Lcw!)hG]/oWxwWWwnoIK(en\.oVV6n.Džօ\_ߋífz,/o/Z^]ݖrn.*upw pX\_ U-?ˍ[~?>+8nY_tgqI [GۿsH\ow!>66}եl?|i ڴJnzI@5FϏaNs,RL-R`1ɲ}MV7yeI~!?YXV+lx)R8/Z'8`b $1r Qz?ۧ1ZM;V8bcYl YhD$kt_TId E$nʁu EA~V O8Z-mYO!VZA#AZ:ݒ[J>CTl9"و5 |ݹր|/#.w0ޒx"khD?O`+sX$8V'{d+ qR9~ i±za+Jtli>. EWz:V^MًWmCEQuabAJ'`uy5 [P{S"}r1殲o RS`S#m8k;JXq 8@+S@+'_ C!RO}E`́JPuGONNjyW/|\ǔ%z]T.C$cEp._i4amCD6b&>9VoiʂI-|)yW='W6M$o%KY,`Wf1_ܑ2MYٚo2J(NJanI+ƳC6;җzNEU"L 09mB-ˎ<عLۀG\ 7۶+l҈sApX!^-} KJI3;j !ҝd`>&5uKMf#pDT+n*cem:y0W|c0Vvގj$Sp^M+f~@9kz'ʀ*Ead< ZW?V g>ed\)g.C]pj|4逜*@ nB`ЃECTC*7k)[h+pHB;.U怕#J:W?*nzfw*B"t; K1T1e8V )WȱL˿ˍ-Acu -:U8ot[=H3iJ6J#9@2vMZXc Ub+A_A3eORoOm R ſB!ڇ6j_UZ>a/L-w}>8Rj.2hgWR"\>y!̎ ?u-%/!_Ɩ} ' Ўa 2+ဝ+A[QA ?dJP\vc"Kjt-0;} Px"3Pc 7' PWO|&+c%* _jR|\:t >jz(:W֪F\ǔz;m|Ȫ~_}FI \5eAl ׁG-3 ;UMؒtqg%:W\mQ!%8j0dUo2=rh>*YȴU3Q,̸*E4ߧ%&U0SUuvͦY;c؎]XE S/8dɱHEB:p`\E)j<).R&#ÃecE,dp"nPS 4쾔h|OO-D#ڥ4bZsH{ '3JDbSKu9kǍ-vUAG`2y*8D*-Rz_z{җЩsdJb̯["+BLu>'Ɩ=xm][4AkM`AWO@$52F$Rv%S zp&m?e8 "(O+:Y¦87DǍ-~e;\26Lة&*cUAN=Vޮ$D >iԸ>U{s]^l`+df^9c5~nZjA%SPX>8/G:T60^1Yg~?lJ21EzL{vrQUߍQneys=.E)7WwߋҗpwSmLa^/啾͕ܙoVVע,./kuXyT7v?9?t?Et _IHqOrmXMlk1=VzpO֠zh.$@bhD{q:v%̈#v޴kӔ2͆!`co|Lu_~ǴT?cr$d` P}U=̀*Eg.6_~Oz_Z p&>;d9OU᧯%+|t><,BLqՙcoY5Mi^b``Q#$[+G~HC+ipӘ͏DAV$"*1]Ywmm#K~ٗP"ټ0'N3`nJ\^dkΟj%v*&aL]_U]5VeضsNh$"ւ:e(Kǻ)*L*%9H0VX$[&if~jSXqƏUX<*5xGybTؔciUQJspl5ZF-(fg*8+nO/yPH>Ua*75U( x{AU)a294Zx\cUV$ª`U -ifɠNy< d U Ƨl!'y!3.h4/4Oq =J/ `8 vz2>))6)){1:)){ata~H?7 7aύ/:i0y6|^\نWϋ+4P5'}OC\z88 nh0Skgf!cN@~wϖkKU&hw(O||98MnڗyiZsĞ,׎K4fը%[X#PZHƀqz7 tL:.q ~I<y<_[w羲v7ot!ڮmW"Bw5*G_z[ĄY~V;\d2Bdnq/ō`!D6i(oM<"P("pSߋ䴠 (V1)YDtI. 5)sRl7_,ɅEU+g-YJ|K,k]z޼:Y1I`-&Q*[-P٩edHZIlIbEП,&_|:+c)B$JH'a]~ ^AGk_YZuݓ /hh QB~\?`N5??E f4lXҔͤ%{:#] / |!ymDtݚr0g/M<9zL %'(]V ѱ4U^ei UW_w3 BA6^a%'u*wy7&/'ꊦYWE((y0sqQ|EzQQv y/B]"Oڟe\8B>)᯲jp']=Y4o]W?Tݓ-Qqk. py9ULA?ᢺΊ 1OX7RA{(88Qq{<`uXN`*KÕS㼾-:HiGr29wOq؜<1t733a_7K`2>;woY)*?h &ؽ370ot{$ y5@4V8+I4]G 7|Qb 캐GA|t>y{c NG\[WE-j0羳gYA\Dq+XvW:XQ|UiǨCX.( dѮ\݃}>m^YQÝ?'|){B̸=q'^RDlgڰ@at|_} cک+9;\ϣ4Q^c+jGR_i,0qмK"x|`a:mFcyEW42p??CQ7XI}ȸ`b}qʄvݙbSс@5<~mcnuT ^wAjq쪟̷R8 s$N޴2؇h~h(0bhG-΢O,Oby 1sq@7M]OUQ8xF_   a}EM> RD p.>DHKE*6cZ$4iEjE2}}Lbkb LG ED^,KAcys.+/SGT.3z2r;-5\((+^QX1Y{ʮ;V=b'#:O&:|DVE&8@ۚE^]g+õhn8Qw]YOH N? db!YU2sLNQw֭뜂+}x}vL&=?ܭp*8Zwϓޟ}94ʹLG(j1Z4 )b g](YJU#(D<"s-iɋ+Z%Qu|d:@J@TڵYvu|ҸD05Z1#3K$W$+"Z>I|MWMG|{W<(l(Lp4uz!3xAxshU]Ch8Kf[GBP ~&c{F m2~Cc)tQmu!i+$LƛDQ2DjM"Z*!I_oѾdeہfY)[MGRsU*; MVQkRO^=erv5OhXxsAku%Bf/2+n{7\67\Q,"Л9`6T%;<y0|Fj3V+о:@C|?{b \Ki"ʜ2X+O0CRՔ Yޤ UXXj_dR9[;FeaYpҞbk\ 8Dk9OeMFDZZ~͕yvmKHq\-ʤVyV>TMzu(mAe셸qy OYud}:*aV0[7Y95߲eQTXJ:ĊrZj_r I<<&eiWXS路zFl5x@K[$&1yS{^޿:ai`6Ȟts UM˨} oOog<؝eFRVYҀ0ȫ-m&*;y#ylVZiVT<LI:GTq-Ej>Q倖F/ZkkRV%j&IXӶUIɳZX/HOmbͅʳl{i.ŕ"CK6W*rAvגd,1EV?ZgnS3pU嚸lR!-p8AAvk1&'d2b3 bS}7xh}h:hlS䍥"x}PdCδE4_hEHޞz.ΩuTiƖmIUa0򡖈!&6"* ܣyWD?UL2ӭ^YUԸC=q,;ڃں)VT b7]ԥV{*L;4QkXU˦&hNYTFӎ(1\QuCEh`Z]ǽ+Q \l6p\]MTd7 ,e&vu\y-rA{{o%nyV+)7Y"<GQu/i/Un+w_ll'r$,O M¥J&! LC"F(+.bsUiX,5K~`(4di?8 35h+y[. ͱKb"+V_Ռ?`kƮ,o}|`S\evAhJ8ՆoN Ǜն=6Ky.^Y,wzl-_m1} s3@ a{0;qMJOLtCj xtF&ZO!i)3KLDoӾgGLD~;Na#=ƹ]q aV&2bd(=;n(P&&oh q7=>Eb_e;q [ڄ>aKcpB;؃@"ԤʲEJrᬠ5B,]BMDPH`z 1ڎho1KYC XL#;09hB̛ch(v`(2<6Ǹw=],qY5ԔXb!U?XQs}%6{hO5IsBTczA߆@U7Gu6;y<둧?IJ߆MxY4|Zb9VOI-dzz,~fOXʦZbN ZyӞyzrߛ{)ŷ9ۇestF?;3K5WanfÙsw% 9_u:k'4ER6Yԑ_&vlّ :fnڤD `~_6YNaEU<B!&=~{=ktV>q]{z[|VS>KaՅb)<}.*#ű֪窱|ݼ2**m.x#G}N-`o>i\~EhXt®©{N@\2kCV.wS`;?e5+]?xoO{>`::?-,}nA~1?}f 萂KfaG|-. ?aʭQȞk"ưovzeC! BzxBp4D(n?<) @bs\ѭ1= A,A,J,dߜpGC9 ?G|1Of<~#xih `gyV7A}0J!s7h#/^ĞD ]CاXL$~?"$}qA0/ L7{;X?<|VG{D'a{xߑ! h‡;b {,6{ݧ@<^_repimL/bԇD N fF|PEq( tb&)lyu۟@}SYl;@D?͓5lG^ )Ot(,LIғ9Hî~TPiaK2dA_>!m,{{Iђtln;{KXȎRḍZJ9'I;H ijJ nL{YEEΧ;.=eA\nL2Yi|D<ڛ Nױw zYcSjyųسvip\`q,OkD/Gt=,6Gk{?8yA{,̒::xaVxaND>}{S/^hŻ:vۋcvMq^IY\gt#XVGu*c|$D:g4 }'R-rQ˭x2q#.Q/ rb nÑA +7āP^Rfx> H-~"ܧԤ58[pk ȕ%*ʈ|+U;|R1D3gDPcM=٧\X[OZ.$FG x+G t[UnݨT!^\QL{*ht]b-wnbbwWi1|[r>ч["WsǬF2|'0`B$G~fiQ ?@p${/>ǁ^ߏί+Xݦ5t|Jw3ưzR0x7?]+>@^~x0g}t9gG3$WYeGh܍k=EWw95oqP1ar07wO(A"0 ] `A 0v4;cAF7~&2$4H͋ O0Yj|O=rJ/aܲ䗸V4P\j P/DfSa;ZwS`غZAlܾ5f/Xo& Og#[|jS(iF|hnү q0 $y(bM^px0+q}ƳZ&&uc ],f F$9%(ѬaIU7.'{iFhCD.qh廈"k(8Ga0M02G>[Vݦ,=#FY|ӋsFoZE0JQ:6mZ4 4DWe5=yGh}i| oy[kmi꭭Vzy{~RzWOzt'e4b2h.zuw3P[* [*'TP:z=Pz[oA=H:<[nAh= Hh]j۸@",-i<@揳jF#[u=Y41gyt u˼*J4ѵ\b YӏJc 1{BKeˬԮhjS6#6i[ҁP!fT ?ީ'0i糬|S5叡{%0/*#'ysB *3 %/p-s蠽k޺p ~uŦWxCuO| -xƩP2E2BUy/?UYGe^c7b - E`W-Nm5]/H5S%"O0ߪt v9# }}4<Mb+?)eɂc`("ڛ!xФ~vΎSW$U>V(ӸZ]e^ 0YH H+)7':-k."嶔6¥ 6E_x릿i ")qVp)e[YѢpzͭwP/;٤( C y2_֛ur:>̗EVz?[9ܻP1h#_BʆKfu^L??;*{ 맆`[VUo\=.Z* nSMC%x(4s'\ 88Kq~c ;@3m!{$\վ*3kT>2wʄGCl7F<<z!Su ݼF3T p.g`#vj8FV0?60w7 ֐,tXO̚GEzoEMk@S[_[o{Uogf>k΅x̢nbn`.pMvqFE x2פX1ުrϢRĔ*K .M'kv<^v_(e4DOi;Fl}PN,N%g9DpCJѦZ(LmQ\$` @|6ˠ.KgE!FG(.dtTQ:V(5 o:/Tܢ wilA5"K4(Ocϲ]BmF< ):4FUAAbEƠ> _󷦔'*O0})np=ݪCG I}rs@G]sKv,@D>ԓ(fc"M[sNVCb "!uᵌUYCK.p2bՍ[YrE˖T-*,@t_T}۲h218a5/|U NB6R@c`eG@~+b }Wz2Cq4&ck켖FsĂwy䷅/M1 ao~ zݹETYUdHiN_7Ij`=`b禋ZF7!AY+@ Z |xƳk>ĸG0VΏajɦK^\pVD~CAnځuu/SH).޾_oeQ a'EY%£3 lΖ^[셺y}}bq2ù ıCWcNpI0JÐ_졵BRh8R,]~SX=SIz$0FY gbF5J =@:sILOW_X̾cR hRKڶT?W'eWTc/J&oх-jiwbL;;%&t3 (>bz?XR7Q @Zd(I3쁇\&Az.xDZ/<,Nne6z><ȂX:)+'3!cw(q]l*e aρs5FV12/`jX"VIzcFisA?*MD9p89 IZzcx07t08WR$b%COa؍%.,O\}3 ])M/2曓1fʙ1 !D;߭*TAX{ W@l^@b=b\K-PLRD']K·AH|ysv)=T?Ru aV|M|ltOu0s pqr |mc"EKxoGw{6X`Ī4;%IÝ+dv)D] 6BFFgU%B%>ln2~|}ؿ/y&' Ȕuv$Gnŗjj$+khQp\#&] f Bߗ2[|s~ X0dM^')$t=|61Qu).seqr~>U=y&obɁAtLԩY [d}-t]y3R}}ЅH+ɡ^ EVS,Ay،BnvA'KD_ʵԅ$ts]o8NƽV'cAsJ~!=R ڬjiAi(LcXą@qɦo QnP;/U/1)o*sਅ^GEK{q|d1)~ggwWϝ៯3zœvXOae-XnlJH1 8(:3#oQ8͕q6D7,e?+"f iy,3T '+UGUĚVk>]).pżv' /xE"Xf ^?<ޱ8yWϵ@4xˮ {R/C1vv'SQ)&0 r,z1U\D).p|vwXp\Vb1Eһ<|r&k\[\9G*Lץ)܏7TsG7Yp\u,کu ǒT/x,_/,on~Cj%< ޵]JDi#a%jd^*I拘ϼ߰|NqSwg5X}o<dsvi=E{ LVmX@)L=[).TaQ]i]oZK[˂. ]:+ĄJSe. ˯-Kq?LMs/&)i8O1L'csZGQMNUO,R,]frvp5GNX$QOPT?LfO_|I//gn,8*"7)=M dt?;8Zu$$ ♛H'Hٜ'}/9e>SˆUn;>mj~|wJOlVLvOO(@YV6w-+';k;s}Ky,I[CjeÂz"!0_jFLS,My҄U J`^d>oqni! v)MS,]n{۱5t-'&HqYUgj"k2.ѝ ֬jZTJPd| 輎Ȫ.ewǁJ(}OV+GHK'ر33osșV~E~Z9|37V%WAaFU%rѼ9R Tkx^bR+uSR,] g~3^s%$.= ~09/}- \h/Px  uKHGd_c[Rg:34g+*L kJ#&фTԽP }BZ Pc/,"~UU9OɝRݑ:yޡ0fkN^?~'qz\g a d~<'g}Zk ƏY{R(X13G#s3Zveqqu/]Y 1 @gHPK'=, _}WN0R*|Kчپr_ėpw7tX#tǬo_ 3 ݰ"^;+J,gf~QGEɒ"& ߽>89~V3k1Jܹ*}E ?AQߪC˔RHXAsd> yԁzL` OR ҄Պ^M. On/ޙj>}TWe5(c^[ƇʠoU4mc"maNO랼7=6;,~Duz?HZS~zx >zUzOUyqYp#g21Mj>_ ,Q?ryWGۍgLg4~\}H*!jl]4G/V㩕+K%rduMĤs >UU` kl6 v_1 ׶)o->>eqpB烢vTy{gqH–j2EryY=_P> .gNh@MS(W՜x)͙=ĺEGYNV1eq;s4hi'r0Y~U, ^tEt B#S,]jos'RPـ Q#-{ sef.q4ߪ1ִ2pPRwO^!\REoYpӊs91_;p|4m̘v br AO=eD`g7Vޕ?md&3Zm*%{2I*;TFC< h\uA&)P"!)2vO:,wG}$.*/WM`z)aG W\Fv4skb}_O5 w tsbGC vC?wQ5pdba_-t21'JxA݅NXe/JGE}/oZ Oa4 '?l'pg|(ߎ^e'|cZVyBZs;6!+^i2p4tou&7{s8qE ]ہkNnfM2g1ttz~Z)\T74*8={Y8&tG1(?{srHD!F۸DDůC ZeSKgxr)GXx JƑ58AJJ xA|4Q53nk_xk͆ ϋ0O0+Eq7& ؒ\lfsgY/o&ѹps{]~tc;O/po{ NCNh쿮7̶9D\CUVu0e\QƿyBQj+/]jqnʲO&xO{ i8ߕ}COƽ |Xh}9># ؐѴW?B{(qM8)ÌWxWZ=-qG^q!s*axnI(FE%k`+l?=ܕl 3 9\Z'A;`=f4~0Q6ձ&z5Kdfm;3/B;˛$x0FG)+g Kv}hE஧h͆L>L!dKZ+ߛ\fjQTWOG' LZmrsNS3I:1@1eUO/V^}03v-m.(&rIܸBTm}rBya^Of@Vunŕ&g9ZF(Pw\~9 Fb.n䓫9 q f>I=),>*kw=J-UEll)^7'hm|i\J${q/V]qV-7:;7v>aCB**0G|~B5^Uh5kBǧuvf7ICdF!3py%w\q(GkmZqJSp؂rTt*YK, iư|۱bMK#-'c u _IKYlg }v~Us$n%C]"6pR*1m! ȋmdU I58L 0LZS}v4ZrT@I_Yzy|?uI 4 ;16ܫ.WO^aiڇG_Go wԻ.Evtֈ4, xp `KS- DŝY8]a1v*(!/l+44,(|NW}:t!szVAI} {@թq.ys3lz -}A~1?z\u $5l |S}~ކTs2J9 Xؗo)Gb7,oe簌6R={?Te퀋M31qͰPx k[.=ةyx7HePP@e#}NgLH\{VlcK"8酠WŞ"a"+B ~sKvw\QAeckOtK+[4]͉п7nG7 WK?r9v}FR{zeYwσxZ; ˲#U"[«JXd-KYDcʳ6U'c 9,z|>ү.Yl,TٶpCٕ!` |x.@Ek4ϒ֕_;|:9i'u_l7u܊XHMS Bk)AK8Ab3e$^YVJV~yKnGFsbS[8qA)jl\/ JM#\8ZR#O*I0S.eXx3ꃲeEe[ho^mX55a(V!9Δ"L3NR':V 9"#DJ3ff.\۶f,K!Н})B$% ()1N; d(u9l@Z3VJ3V~oG$߶#XaWoW_O`;WnW_KZY߮zޮz0D9[f|zXFˣOn' 9xxgWL-f6 Q@*fS]$P Z@FEC ƈӆy1c\ځ.s3&}o￝5~l~MZmw'frOM J]}pJ` J;$ _ŷjL4 /Rr%2(/ `*_%c03Wz!~Ogxzճ7CzBc6Nzzv[A ăwrܔZިnQc}9nQbzܕWѶ[5]Ґӽ ͱ=!ww ˎkxW?v3.d[3kP34֌Zs<#d3I>3jER\nψ1~FΈ(SWBX=#F%gd>e6z*?_EfCWe߼:Zɟ9\҄hqKRML"F 1!T91؏Ԧe~a WYs6u~idRu?9֝OTu?a ɆS݆ۖX6#8 $\8)DꐁI"0a051aXqf3ܦ+B]^+A +o&uw@\ºCFFokC-'PFĩ.N$5N@1X4sHD&xCG&Fg\Ue7fw ~ƼIJZu9C̑F;ϯoH+ܡ\SrtED&*3k!@ծ!87C{`XC _/.9+ë'j|\__V)M6j^T&.i-R'0-6-9C0Jd[!GR9kev4jiҩX"GR:k#aVYGѩqŖQ ضT˧hJlM Oh O18MbX K_2ESN1A)D@5n.m]/]-];"(OH6(A[8r|>'SaJ;ET[CcxƂ._[Ju0r>&\.yzCTWd4J˅ rvSZ.喕V(1Ĩ;9b!fa'`zqq#oAӖ& XHd֙46F3 -X<4M}F "8'k5rёֈ}iBbK)Wd뜾R-s:2$pW8#nMۘ"CRƚN;QJl| 4 s-(ĺ  ՝T(@hD;C?cT@QXCy:OpGSHcL :mF?)SJ̈́$!iDjE'q(4aXhq1f rQDLR=P,PkqH !.)!l=Y' 9FԒUzP3҈=b._ƖǯA`C4L\[l!)I FhՎGX dGXIn-%&zD[!09Q0E,0>06'FEW WJ8'JW pL~7S0P29MmF gH &HT'So,HB@yt([ 1"|bI8LXỦ [ @CpW[͐ZJ$A`"4FIBthWPFG挊=g_pq a1S2V BIJ (Ә&D%" [buxe[(PSGRқA@ sTmnGS=LH%`JB {҉nNP=W<'kU x@ukBF%JǑ1&a h*ñQmʾ[C:Ԃt\ l+\ܗ䆖!&e 81шc4L^ !8N|i(%\iߜU/` ݓR aGS1ْٞy+d\`EW}Xd%M&KGФ?I=zx蜤a#iPK44h" R gSUo自;Rw\)ʗЃ҃6p;\"c&Ƅ %*7؜l9?$1"BbI BL 0WF!NH(")BGA0ĪieK@#ibThb*. vF& Nh,U)lbłР%4ơPR eB'7HeHLyL"P'(9wX7zԑٖdMl/g/-fk:PnpdL~=)Dn[uegg40{$ ~IC9O7,i 1*"OYyZ*bp ̄7kN7My|JJNbaKtc3B!tdabH<, qaga Ah!Bj)ᑁX I &8 0JBa Zyw1X`rQ`Ji箉 `{NǮ\Z&D9#Js uYLn qR dقqrԜnnp0L1ȸfw]nZIR'iLE/u?C ߘi;WvY r<1v{s9ֶ y6KbpSp;NE;oʎhhu`1cB zG49'Kns;)OM!z[22/nnteWs|2KqK;vhmib=pS$:@;˅k6N{ꬥ LununpIǾ Z0?B_u&5OUv~S Qo.~vʇ+pUW c(t;<(*C^STlyF4ǔƦ(.lgDN']zo 2 j}N[i9y题/%0&C,1xrT媾ӢkbK-~a]asRI%-ù.bac;FxxO塲:o ۾[biQyv}]5C99SRƔ=G%p?ҭH(L.h)*&w)V'd\syY\*5т8ZvO~OW܄ otmfާ`R E[ Л/&HVuk0`pXTy,|i ~Tr#@dyoapE1Y2M8{?~fq~|4^11ޛs=cjf"֐MS/3ٶ2s"JK(Bk$_vTrE^E,2k akЙrP߼*k?V[]601 :$y Ĭ 1! v{P@X)m⑙d crc) td3} pʻR<'a&˕8LI+syuWm21 ]s !/tf:L _O:_>`MVd0IJ?h?S\vPKz?bP:|!̾Z_v,^x CӒ2Mbܜ}+_jp-i6>!]}S FCx\kwؚCX9ifR;͍dn{r] qx/6P.Ѻm,}@q`9 5 Ƅ0Y;6g$ @E0= {U⇈Ur%5;/;Bv(~P?FGR2+W <) oH&~59SFXזgqkglח4L?_^ҲTWG1Da|Gwp7QrP=~2Gi/(b86@܌Y:I`llgm-p.M'AXG64K` |m1)z$'SƇù2~ءfSxLިx{6nW F"{.`mM^5Ʈo=]iB`x1 Ơq}JV;,0?i?i5J&p< 56*xb=L_w{NVqyhHCWP[ucp`QK#'uȊ#`%;N:%E˥9#^Ů0Nf`b~PNb0 |9JCQ! RԪ&QVQ}>vmh'9Z){9$B!܅0Vq% U~(GƘg"(^Y!'b';Y: zuo[>~m;lYRȺZ?-!MiAsiв2'%֬ñ = caiɜrN!բJTa|fEd[_-ƮEe4̫2832s|9Fxl̶Cs_Ok.7K=a^Nu?yɳۢi_~ٟ6yT\K/1f7)MnJ?Jr+hŭrYvO0zce-^~}ƕZ=_-BAGkFp=W;*ZL)>$y1*,9i1%29$ykpRn[6pM&F@a;_?~/|BDȾ*igykŷS60;_a ҩYs_5SUQ\ ~m#vȢLKE Z„ ϫ EU C+4׆.g"hB"\T4EmĬBiELiy6hC}97PZ-$7)KJ餲Tz |V۶+ #ۺ̃ӬY{jKw DƫzČ/G]Zկ^5"(*q~Ыwo˒3ZqZEa<zzw`Pm)[}vDi( )-|ĉOAG1jdE ?G>AG')``,By0b$F:womWJIҾa[ A0i5gžR$F 1ӢW%̫/-C`4Hogb7nZ'ID"6#)&/ɛ'Gn7Ss,Gx]oFW}i "a7AAhM<(l/_59Cq^R(i @&bҊ藊j&60 zZՙmVu/ ^㴶j"yÃd\?wU?oPZ}z֖;锢:J_wɟjn-մiy]VpYVNJ.Glӛڧ#c)T|俌}hG}.,J2n鏥L>Y(ۋA%Fi\R!/#q2 񘻠 6K Θ ,x -u5շ47+K?j`wP_[gVWWmi[Tduh.'18L}5U-.gih:m6Xm$qY-\+weAm\0Vvg&\VQյy.˚hE<jCn)WO\1#uVt!hlVl j:UC^̖l>?oo䧷MIֳey1b*`ӵq7 [/ku/*h4 w /ˮ@}U VIfM=<,o'l. } 7J&#qYmhDz?rig)~L=FG.: *[P P P P Pm@PM lzL{_^]1>[TDW=N82bQvI%/ zآuhӳIW5ꊈkxZbI)5-F>yk'sc;3w 9F-TF32}9F9@oxX@BLGbT(.F Kmq;"x rb==C{`An98]yXv7ŪZO>֩( 1}O{5e!:ӫTMx6's7}!U\Cq;\Z!i EL^͌4 _| h_=Pz=W?44uZj ݽy͊w(XΕdh~p`>c .{#՚!nC::ǎ32Jݟc$g$s=V`Q´f܌FmEU=NoOFY 񕸵[osE/l.¦sJ`j8cGY@X#<]"Sl(5F&պ*K}4"yyGSp].4Ynglڇ) HgeJ@4XVTDb\X7خeӲ%qە"*VF{: ͗2q)$)5kJҠM<0u;EU̲?1{iK&Ƴ A8r1\8HSpΟDý3Z,b¹tȚDZwY̦hJq"yyG "N&Uȋ` #F"GDh\ytsxDGU䌟L%e} yGU.7ŷb)ājI X,@2zdpj[,WSaQF)ͩAtghC," l}몫!Z%"Edz՛&^-2ߢd1r0)QEIq&/3!%U!:`9ijyW^zsr#8 9S#}4zY]En31{&zp.A6spch,>= C'W] ֔& ;J $'l.ۖ1؀}L96>w+\! Undl* r\ uV@5`{ r;`:90+nqpb0\A#v$c !nnړ^!I2=nnQA %4>S%,HK`j]6]KyDp]Y{η+㶖Q ڌqͩCB)^S $ {+!JJsڷ+dw1YxwŠ -i &l SLQmj)x愔~{$CU=oCy~[)zTq,gPeA[] rrܸp;1o587p`K() |1gn9\2(2i#nIo ;~i"(dO SX"ղI{|Ɓ1C[A+}4! ByWӇ]lC^tIۛcr3DA\ TSdeBGHj yǐ35֧ ̦i (%j,d!)|ױ\|? qv'~nbcOIž ;.l^U d1C$C7{x 'rM5̖= 0A^+ɿmdGK,Y7Dj,gJ.Gl+ћo O="tHGT|俌}hG}秊Yl+IU~H r4l B,qY~7?]~ik=+mKmY ERյy4ZbힶBԃ/7pcMzz~G궭t]\5L.AvM.|]hW\̖kl>?ooDGٲ\y4m?Z$~?fG޳6++sA!ySMm5F3֥ՖV[MLbbU.᭮nl߄z{[.Ha{/~esR~>e?K8Y1&WlvіhP D@,~ʸ?Og y|1nEl{?)P=.)Y6]st{3!I:m / | ,Svj"WkEe|$Ζ+7($J+LhkzY`K3+@"h$)O :FF !AS 2ӎL^Q~Qvj"f#i>*k׾# BYAVL@+.Yo^z`Q[shv,U79W`o; W@uPP]aBI0gy1c@5S1jJD><5㸚 w0ŊYK+VpX1/ul8L_xvi9M>5!D_vЇw<`[=GmF3푇zQ%{N\e׃Ɣ\/D|UdU[gU]  0Vq)yƘ΀y ̧HqZL /i#Q1b?t: i;A׏l>Rq;Bd̎?V\Z4b|ؠ Ѐ.3f8+s"EgD>)~np)ˈ)F23>nU@IBmOG+&d3|nr}x8D;>brKI^;)Q;MU8KkeUQ?#Z;D1Z->uTE}\BNj!*Z+O&Z 1x[A{c~-HkgY'e\m \OMYٌbJxp"ӮȇyGw%1%2r*V$~"`nA%xʦ v=/ nCj[/.={\iLP[8W|I:wy(2QXlY CXAx:{_M䋌/=q+XUW+;w.s:wKPK Wdj v9V_6q"ʼnFllqۑlOJ}eyG=e-կuH`w%D-(5e9ӱ%{$;Y79E' "1r:VJtó}߲$u8Oޣ\%}i Ta`9[S;"0'U nzqlkQRՎ'8[+k3.+w9S+>?젳cq@r2&`n?U;eE"ghUI324_mM iRLts?n7>;챴˸GqqͦDeW|7\A;f#6d,PWa3` I"4%L(*j-ehw9GwDzi^o#\VF}N oYע {hQ#Y.,z(KcCRz 9~7{hXa9|,t(6kt=d.֒U2f~{zY ,C8):w1#YtKƌ'Q`H A |%bTFp֜xT50uw- |,Sl;lNBFihB_u;Dh]$qd ÙvxYdȭϿ5(&4 |5 <#kT |a5|$YA~R<"\H,h8"h˜ Q8LlѾRK=?}g cGrw&wÀs8)Q &:zn]`34,>墓kqsmY8,|a)|,KgwTf`f~!.W &)gHR05cܛC{-oq#yt6v  7=4,>L!^lތb&u7Cn}Dڛ{ӡG2]^N &pqR^Bw/ߛش'I. ғYHFmqTG]TKM'XTqyqv |,s݄;v1#lW55 VUY"8[Dx̓gQTϰa5ݰ |$1\y-Jg 7)aF ꡇƁDZP5ŠYc4DHn3^[W`0pA {hlQX"M,G[(89ݸk`ucO䀴`OHfW3Y!y+ID`?R>I:产Ѧ[ pt-PcYn_zg9Qkh2&cf9!O:DAc"< WǴҴCH2"t]FHkŨXgfLji]ixv2ےԆӚX8α(9~;cGr|Y1\=Ϭet3o>8hǃy4&&ǦRNƊ⣮fFdmm; lQemm% N0pDhl n3v3P#,fӢUO7m殺NU$"/8+23}~Y'=Vy@ "pt=AH~sz>vYiu+"f!h> ȥ(865/`jkU͚ '@ jX=3UANpmWDzؖX}nBn{5[87KK|aUi?xb$Nh%VE}ZOg_MXмܐJ]l* P%2+oG j.}]7`/ C;DG\4vN_oΰF6ˌy5O ШFFr5-1E(gF:;# 9YZlT?b{6ޗ<ׄ6K `CPz*r"VQ#Yl&~¶CB^{|Z2>C .b%7Z36+iH[FTNef6v]T _nq{`P)/>ڀ0# b~(;(ƒwƌ!ZrX8TqpDM YѲr8 3psD_@`pplϷߑnEl pn'&ף>.L/0C0]צ嬍O԰3Ŏ}k2FuQl[!`1g:No%cKsˇR'zhrBSItYz|~~C5)qr*_caYHh!X؀iyn'Q:;Hu'f:l|l`P97%a[ѳ+i^tmycR\3*8y;o:BU'N$XW%IM29.q`8}}^Y?XVJKd{H0[r 5LYgřX<7">JvW_?-ՆUҦcR-[ %"yTLNgfE^Vo߫w6u 'k'7;PV(emSZW  yqAR妙뼆$VD#$ t2QQLL$ €B?ڦ{TZ*i8_N(<QP/BP*q:-LEhzS0%i5bj WxGk6Ԗׄnz ti&:]gJzV;ǂzג6fGj[Ÿ'Ռ%nXkIsV%%]aj*M b:EXG POg|˧ZhGD\k|3 |$m PՔJǚpꜯc ]TXCڐnX C~i8)MHd 2,Mܯ "=}+2&U=/@'x-tT2PG*U9Rsū ye%( pVUyOXۓ>JZװۅ~zEqzhΎSŖn|s];_N[ -$И;?@Ů NBigPo(R&"DjkcS\:+T{^|ijmoy;X$q|f4N0,33DܸR($\fHBC bdH)2h&RLes:J~:(} (Vi) ITcܝD{2_Ruif}œP&]5')GvWf@񞪬?k89p(+(mO{3Gg҇Uc4ӊ|6CDkfP4-tTך '13'x(NA-n뭇z1)(C2o,B0UUCcb9Ćz.ȧe'.O\!f\w7)Mchlz&dǭzpRuT]̤eqt#>)Ҽ͋.Y Ml[΁ K]jxEd~,ʼnywKبmnHJU:>Uݥ!g9;f KQ4II{u== @$@5%rYT٘ntKLO#}3+G}zEiɢn#l}XgrKP)ΪeǸ BAUYݭJs4Kv9s@M2vϬ%N>ޮ/GxY9Z& :(jmN`1Z+9Jrf|f̨ͩr7Jd \K ` /ԤZ%Y/W8r9͊|D?`ey_XH "7Wm:Ϊ~Kkix` YY~ J<ά#1,bJ1!e:(dՓ[o%U\pf~oLnQ@dyLדoV~=Y=,bu&$}u5jvɻG|Kл( tW' ܢvL& $|XU?&uNn\a&9蘄g,}T»uV=hq U0uknx|\U ~_}0 UYܶO,8iVQ?/B{cpb gD#pyXj.|7l%!/[@~mRq jaӻp(c}?f|y`Y_;-U};VkmF5v0߸'kf{ Yaa>_*`!Unm۸Q>[o'``f?2/@e'%pYe61J^ FPhl]c+&O~"zX2}Ղm S+_lWMb})s{_`V- jï7a^GEDoHq P"eE!N!dy*oIZj/g@KE(jv66´5 _aw?g~=OWv {Zmuhztɸty(bt#b5FD_ClrR=u4/6pdw>#(<Y! ؎=DexI+iv*2R9@. R]PMQu3hT{ao"s#جuUtPѳeM1#_cF1vc\-QTA+>#zc<qO?8:r:${aZn"W!k5/}j0T=[DnH#Xí xpI~oN? { CdOm-mT1'T2XոAGz d2^`l\k0}9C1ă;>)|v*ߜEBm{H/=2aH#50Cn1^;d  ٌtT Fh"\H,J8U\zU8I|-ڢ5wEk}W 1֢cXFu]>;\#zђeK+~e2/h ~Ch-("*j= J7uC: zc-%ƺE鄷Mɒow1$ h@qݺ c!(hK)u؇^4YQS7Ce~! .rc,rRg7 ]'ɯŢ'( K~O9^<}[:~#!F[c >Ng+uVx/:Ǣ)/Pn1|:wx<ءK> 0 MÜ?]lZ\!K~q0&Wa#.5ҥ_ ޘï1%;b$NoN5D;7mAzr֛-&XX-@IބՐX'y|WX*a_*%mm*.Ij{,(tQ2j;bq!-K;zy?_͏XA<6F%C_SJv/a^,?/ /w/"$B@-D:pAPeM-l(y-, 4_/)ޮ<##lOQ)9KٌG7|HanPA?y #{Q׿LCz sSwwN{Fø7@0vf3/7 |ƚȒF$߯zK-mu2`^GX,փUyX|->jEPhŒ]mm.ovܫ<]*X'"JZrw#iݕڡ~' 3}ѐv8t3>+or +oem0ceix2n ULH8g 9F-[*-n򪪢1q4X~mIJ!zbmz|L9 =~Нᩳ%sp (/*"Q2ED(53<BJh6x?ʾxlv[[Qg}JuQ47OV{/\?3=\(4FP 5<QV%x<eNx*케vӍ-f(gM%z**zhZNZk0š/Czx_+hݻ]?s(}:}o cp%=n"J߆`\/z'䥡Ebcԉ ~4~V咠)Kxuح]^]y5ݺjinBMXmN\o;'[qbi_*[7].#N)>C/m ڣ@3fEkhJXkp:0/h֫zƥm13G` ~f r[$ƂE16O9W W%]ȄpsȕOA*#N6@+Psvm:) R/~g!g(YPI-BczZČ᳴n SBrX9O'2w)~LPxbL(Mh(/Jtю1D$ŏ zg⧚ry>Ȧ -|W$nx &39 #Ʌ>aXXIsx3/,|*LxC5dϰBP{.q>}u}|&^IH% AT8& 1qUmX~ذYIٽ!4 v?re4O[5*N|#(l-6H#dBDe <*jQIP^=Q@oGIHjgYQ7qp6T.`v+,Nj\"zuyuoW`m58uFk*M@:rp؈f+CSY `"Nƃ|v_&Mz}fp0Qp~$7z?h I֫0[;bIU%&uFEN_]nj|gٻ>GQVEyӿaiӬ+U_VO&z~6|z3yn󿀆`ڳ/8p9cMqCf0pI]5 lfjG/on6Η7},[8aǭ׍.󛋻4wwkp6@?O\\2ŨAݱUgq{TQ>~qaiX# j'螿:KY˴&c6DL,IE)1AbX'-uڭS Y!/Y|0<GQ|?`E+Tݴ5>\on)n* }&tp"Lyd꤃t7ʡO.bmwa"u{8ru'mb'\q ئx YLfjB/\(^ *F<'3\3(c4\L,Ռ(m?f9CpȮOw]mj׏n;QZAxX*؄ ?^oq>\AME]Gvxa,W>zSV@hMxUa@G7dv}uEz?rmA 򋹟\6<>cKƔUM)iX97f_{vGoOSx'ϺIǃ8$W*# ݂#pL~rpK* B 9a3eŒF,(ذQR8ۓ˙X(dQdYBv'p^=:4[l¡8 MjiF#[ 1buFjĥdBtXTF(H-Q&ARK9)(1*9k"IFuWFퟶUM❃#r6~sh}7_I@ LLTȱN[ dIcptDrp>Bcp:"ٻ \Ǎ')iGht dppGhn9 eW~;A~~tWNB$0RXޡ;c+GSTO#8Ĺ߮ @|88 2N+jt{ǀ܂g FY,;?-e'jo(Rv?ӏ* n'i3 suGhvWlQ93)zGht &}$mp`P:1簠U:lFkQ18.2og u e=lSj=O;̓zA8@`X8d| j*~;9*ϕW 2]"(dIe]`V5A!Hqc5 \؅Ӑ#C`Jy8:BcpZZhBaNd(?Bcpl,41шkED'1S?dχp0kMQ\g=i!ZFm(`qB`+#A;e (b8{p ܍ =USϢibTNr:=`;qjS7qo gJJڐ cRK iaQ}w7Q}RYIQAm'mo3SBw (|jivc*1 .G$A (5IAoj>9Tޯsh4X[)Kka|PCݠ&ֱT.F60`z+UtFV\2 þN &8OKO[2}ZQ{?i}Uf>dhGqSk1ҧ(uU Q.|8kPú(QT)?;`=kc$L&EVb[>7 )1ǎ%cA)3A8Q3;'8뾉3ƇΒ%s{7߁|]C;Dдf+F] >I"gIE'GoIJ~T??{W6xV ?u -^"v-9mz-Jd[R[4%C Cyt`kg3s3Gm|uF`5R3 ̪ 0B54:fN @_l߉k q4I^2Gp &\ON)7p9pS<+۴Wk)dgR3 A׫o4ǘ-W 9u)w.4ӎ ̀CFi!R]9贙4۾<:U0),Do1_hwocpfg|=%iR69=疧 '΀F)jht̜ 7t˥Q^yw^v\-ϔ:;|9]ӄջr, 6s(aP ̭ Cuz0Nd#1{Hg󞴃F{~ ɔpuo94jc/fS6(Yc #Ek)_9;K֗mx8{Ku<{(F ICF 54:fN kز1آҎOڿ#[Mkht̜tCsZCc0N瓡> 6I]Gcp Aw5I:ӀıdOІnpІ[LrK?OJF1! ܪ,JukWF~iiF &քr4GhFHeœ@Ua|>$BBKI(!1gӤnt?lFv}#AI UJrpjht$6 ckپ%jQV3GoNGS9pGbtvVݨcf1ݚ:}N'Ӯ6fy&4dw %lC*N !; ]\3q+KB$pD$5}00pvC sU$)[I̲32}9$|o`qQ>fhj^ 4CLϟՂgzpBjx2C(WyYe>57׿M5hkYEn+o޸7׹/a(vVRGP>{WWƒŠ]j0+zCCofo nDDpLțo/ 3_ܙK70@تPql4 U"Ǧ>hCl<²_yŵo|y46 |̯*IEVafœS?p_-;C1:<..`"4*E,qJ#68o6 uT y$*S4T/BrPú|3#|? @ ]Y SY|7 yx4͕*NoG`gE9 kG82΢hb68GE_UOYq#JԄ4(YVVXW|Fl 3僺޼|VvPVܞ/3k]^O߿elc;f[A"W-5l4Hں;U܋CƀTºO=>ޤ0P h- 4J0 td@c mzw5!p7_dl+D: u/1FeQE+hKy/ВsrXƳ9FYZvc|hV00j0pb84223rS% d`fIG,ŗ=.pdfڴ&]apu_׊#Pme]!؞[ ~4[F ~-d:C8 |*f NB;ˆb8(t>.N\9De8sz"g^CX*D,ée0IK5B$yA GW 6*,^3$=uC/ПѫB3`sAӡ.rb |К7yA'Dr&?L(`<"G';V^BGNztmyﴪ8<2wdRzB<FX6. .Z~&FDojr{wr&a;\~Gq iO*2d3!d4ݲ ItKK7!sYD\wɇ  t>eYl-QޣKKV/I˫n .zwm̆M:߷u֣t:IH ?މg϶wÍ}[T0i~P/8)g%%j1U M0uLwvy ꦵl~blilzDL F;}e#xr6%1]ų?~.5G;B`N/(S) s9E``OHcJ) NQ0(S) sFs:yz?5bYmoUꯇYf3j\P=+&ʁK9'21yBTދHWtv8BJ uPyRh)9M-:[aҌ[$F70<yt~u< ;O;(/x8~D%!ԧ&Ih*) V%OibTsFID=i{_KS695) CGBSOos*Oi ;\ɐωE!.rSFpCsZQE) q{oibM1km-rٖ5'|swg{AW/_}-T ̩1z?շZJcWY`2ћ^GWf@>Bњ3aθ*`J/E]aHF@2 N(v$F"r6Ag怒S{f$û, u<_ JNxE¥ERXહ6A\S*46!>]\y)&D!OY#(zqqLWŘCD,5($?1Eצ ։+X> UwZ楣xtr7?/^lKc4Ӫj̍4T/y$Fϳpg; JZJotSGԓQ̪Ah\O!s x4[0:[ :y-UdպbRxY 9 U2(L GҝvJ9yҐeeQ:70]z/./^D]x+7dPqUY]: yH˟w " b H+f o@W6\Cd9h cv`%@~}z1r 2q{9NXw(-WMpz" 4Ma)G,*V}*Z|S"M:dwh$U?HіrIkq|#({23J$hIdwrmXyq1|>.-禎' GR,8O D #kLw0O(XӦN+:gշ6gWGCk:h@ 0v%B? nx"y#꾑6b4ӻm3cRcO.Vؤ! @2WմoW5}֤PI@d5^1FM`i&ʏF\: @M[ңdq%Ze4aG6ݪ T$Ie0]aܫhdE2g eV|4wBs 7 9zw¬emyWj;ex)1u#q8yhѿYH x"X6O%&H4"O]-uIqCy[3dj!G0EL13(3%L}6xӿѓcJTPy"SuʰHY*]Yo#G+x٤>06 ,WSmIJ} of*UdQDtKb -2HI)mn=rI]#m2ڙ'Iv;Hxys>tͧۯ4no޺q]i9#X@ yyl)n$e]4XfJpTtpS*:8vQUiWg_yuz;V>Bu{7c0Yar $q Tf4kTdžסR  >L~0L^{YJ#qVS HpHra0?U]wK;qO~T$ظh4()muy0=ۈfxvOMyP0lgsh=v6Z('8^`X9g!ܻ]'qfi_fw*V o݅g ^T5Z E+FO$@B5bu Aj =N51s@jH@Ra0*|nE%VH9!f29XAzg "m0cڂChy-+mG7KM3zfym%[/ߝ`MǯYKrWI$oʅ}UJ[Ck Y TrU )B݊Gi._]ƻ/EKĿb_sÖKM³6Dnaxon8;&wa Z.QܓC5V6V-ud=\6숇^J#A0r$agmqk)xU[~=/"Ģ=_d+qfWXW%PmrE 8x~I՛[?龉䜈H{lw`S&z/0y;x] 0; Pkz/`m̝;V68{W\–Nro>oweW|Ef?0"BuΉI9%JȌQӻOLQBfQ( ;O9hЯ׃QK %=fm)]UaM ~BT j*SNc? q|@0hKn%8))A*\B(E`M^_,-8Es09ݑqB4QW0MKI 0 PՁLttFVv渲r:;' &v?_sGeId'. @5gS~wɸpXd4 S_^kj<@2Ci)p> J@-T0@ K_Vm:bL Y\BT+Q:e:h NRgD))HpXk6]5a7ƾ>%^0sX!d+Z k+EbB!UH)!X-_w` ,\h; ~}/,iq2NAY>$:X< 8{-GAn:@DP/ r:\ݏ3|UQgu=ff/LU#Vܫx+jZjG&He 1(B1gtLzv ʃ+xB #t2 q UQ& ɡhc&TGE(-%2`pʨd^;颬K~:{cQ3T;u]<屗2TgNd˭4lܷV ]x肽PjP C|戩| V{3upFˈ0Y Fh7Ƥ3cK2$ cG[E, GUo U("kb 1^.;j9+RpYN1uE6d}g}q?r NAPx*~FS'_&ӊ N$O݋קm/Ț}S8i+#$P 0 '2YX"kG{<9xep>@\Ju'LPF0g 1.H^/3|,< on0W,Uv/ߝڙ2^։3 9+@:I^ E=Vq*_s`ZA R(5뿂8FvBBFRRZj f XNvkM1&w23b+7kΘւ(slU'*4>M !-RC.zUt29eBZM5%;Cnl.^qwP G&W)07N(k@SӒKhtձ|wWPz::޵wFҷ y𔳱q^զ.h>=t“UŲlr9sED#'yĖk\4BS1,cb$v^d@3о˫:XFBbID5 RQf(^GA$rW_ǜ"γdg`U=aj+%t(Mpqtq5B0 $)6ߏKvM$#_\__Mn =[^*rjcC<'>#^<ڜog׳G=榺Ξ?x00m FQ%ӟ0~іm Fl80Mo#-;)=Y;5JhG!P8PNuG>qYɷiW矊yu8ډٮ'Cm gVOxv@1ma>^ɂ4HXtl y%xn"Y]Ipbʯ%hP{!lw)ϢS_H27>2 mÍc>)_zX~e=tfyIl:Yeٌ~~ nWok1#xl1PBˀ'*Yt9]^B>lz?*k9 XBi$Rq76F!J=HpHъ. p:U z2*ٍJhXʊ kk4V=Wd@ް̈́G힦'qGӼMSIڃ9~ɺZ('8φk&ķA|;z|$p;{nInj 7A|̹~Ȼb5Z.RŲOwí\GP/n)C4嚺+a;&(_8MW 1/$&)7E~ހGTBN7^3U@"Z*(S NSP~OyG__\RgP.x͏:4e8~[KVdoyiѝ?;bcQ6&IeoRb"w #z's66Yp8N& 9$&gP;(8'#HKg˹O}f"_~J951:J5 S@EfQ+q)A("3s NvkW'TQv*[ @}rJR$lI|uiP鬶yԭm#I"_70,rٝf-AnBdIe'ίn>dJ%jJ|H"GJ߽|` F%7t3pA7CI>*g635Jfjw9bixAp, qV?g&ӄ0j UҵV5`jUqۻքZ7qʆ5m&֜)[ Cxd6Ca _3`-Ɏ"o[x9D7ECWqHΈCֽS y-Z'tNlje:E~MTUävXgE9d1hyR;j=La+t Op͞=FVVLE`ұiQoy\9Qtv?VUzi]ɶq#Ǯ48hɘЗ,}ZzW(Noٗ사τ!ftjMNcX/5a+_ 5vJdz>-|.AiM*Kď "zpؚ1?|ɅʝTuqȺb"Y)"t:j"ar};`bx: fc(p^-CQ(A* E"@04' |,pWv>l74&๳q4x{c4 ^E^=a;~1y~?*=ӛ/,Nt!M^ ɦ~XlGO-C$,ZcKe'ir?O;X)~,[-ϙX+5N(;۹QTl^cG:(N;`r>cy@Yz'tD nqSp;.<,3gNˆa!äxVZym XXɪ|*>yj,q֛u|ĝsPhi +#\!3{+a9Β8'rMǶ8e'rt_-mh_8Iǔ,>ZR 8B #j9h*އI<g*)P!*rt"a@l4ѩ`FrcrfeeR{5K HK冨F7TWwsTQ3_UQi lAAق+Hm09 ` ڔ4N,sU8N0R1q?<S>`{>Wv]5l@j)吕㹍dFs$Q`% ji$Qw9) EޣQ%JQ OLi_W"OgoQNbKxUvRq|mT\E.4Wn8L~QoMVARBQ ȇh̩͑\@0X_,w"[8xwa2.U "R-46QHPvrdp\6˄d!/SeЗF3 moOl%'-r@q[妬5zdFr|ĭSy=ʌ*jʞʷubtP?'?/n/نH"r1whG7l|m-HM_yڑc;yKh6䫷li<ԉcZc'\>9z(z->+@gOsFmqVZmi֦B 2߷#N}hF^l\ ML|P MQ 6#=:rG}㻟H|~4Οy%0pq! x6߮cjjj7_55EP.b^BnaSّ֚͝^ܜ0z8ʴ?D|1p~Gzb"S-۱])]kB'ͧL.Bbi&)&?k8yn_lVaNҺ~$*18!7x8+Z0 FQp {@l;zyq>^c7"綸~ᡎE1 a>3HZYj" i&C :]$Ƥ} hb%Z5K;)Cvʻʷ*%&D1im>i]5/& ^A^[t= Sү&Е-҅kċ MJ4߯Eq!ؖ1D1n[W]8H>Fת݆cFfHk `wtH$˛218Ic.RgK‡ :uW{pqqo&3(6]篮tiMѮ?Wy" 1; @@Nۚ8*ΦS{@_t.{o>vlpab˄i0zjȸxo̢IekG 2] oBʑF2j3ٕƨwףq } HG'vg we^Ig-{D鉽ɯh^ם/*▗ ؁"Ѻ/ z 6:20#< g ;n|+ˡYbv!C[`< Ao5wik"g.$e=]?Le͞`a^#.xapw;(k9rnwޟ:m@ؽFѳDI>([Q j> vvH/ Ĕd,c^àrg |4[xq E-;5k}Ll^Bvr^0nЅ`W5;tl<`îbz]o]\}F][Wen,j^ɮPU&k穜q~yӋq([-376LqdI d=?s$ѕ^xVio=;m=2:(9S;5dag^# `~7V|) @_!ID2o9{X<㌩\3/EvRaP;)et BB+!BJC%' ; z6S3aZ@jf]7ƾ=IY1v>QaggЁPK#b8? XrqڡiI_Oi[~h٣x:/h>SvI·r$^gt}/jinAtβ*Z67I#׽C}fS*[n\:C8K_ +|9+k1*miT fq?drD׿=*:e+#oCD&N\OjU,EQ]Gg_B;B|6*++"@*պ pQIȅu˪XZ1 q M$Bhy8))ѝռ(8Gï{P >P1@#c J\Sp&e6D s,PY3{cpW#l5 5˽׏Kk>:~)=!lJJlP,7qme ol'K[* ӳK8[R1 +Bk R*t^"6JR)Y_  t4x9IHڅhp`2Qf$nrYYB ٕPvhPcFxI.Rg:EKbLx- AĶ#gjNώVx}U;i2Ah(IXF4oV?8Vb ۫@X*,PH5XM(p.WGOr~OpF{_Ya cLt2'E4_ QK$DPjЖH rU6zJIE户`%X+<DŽRO#$pPH8Nv(Cd(T-4!j#L}4|/xv (g"=Lh C$$_n}lb hwᇳײz|wj&sUlnBr-"0/a: s@ S*wJEK_~p7Fȵfv&qS22Of/,$m@x %"Z(@,&Py, w_]Vڄ3$ 4d] QHfl6$%fR>\v$tX EDnV6AZ ? "9# +Hu4ޡ@/'9 .h+& |1z'#1 sYaBlYvQ9믲W#y8Qrh {%?C}gպTP.*sΞ%7ZOHt6#P(7OWRI n}1q9(LMT ªSeUܻ&R(_۫٧[gە8A`7jC4q{ϴnr9200Ur2S& .+r풓w3nRFwo^!5#kGyaX0Uʍ,3;oTZ|x3<#x1f?ٶ8#jGedE֍ZVj]q>ƈ3 d Ge:q S/og97T-JWo=߿Dv6Nj߾L_w?~̿wZ7* ^Su }&po^u[CxCkhsS7װ6\3}l!1s[ {}&}zx}cs˓|Кh3MٯQ̯|qO(gY1*BPn!1>R2%cOvXjjIst5xs8 MIPs+f #*R :gۣ{f{^Qϗqx&P'%*4'"$JgA IGmT -DMLFuZ9[PGCkKڒd8Y=k@6ujMͦqy&] ո%؋UˣeXN5E4e^Ҋq*N)}|\Ǝ}/k^YzL!J!$%^)Cي^{-THƱ`c)U1H*TM(KQt5u®IY=lXrky짹{6jP`Awq.$LP "&10L>&ncX3}Ӳ/H’zL) <*,2%ژ/BxDQVr]st2 1)!R(S<(`) k2Jx#eUIVKJc!tp -"DJw9=G>JQ"E|L;BPƹIB\K$2A3⩣FksNF*k^# ~ᖪ".zt,x{dȴUp}с'Jh*.%}1m\ Ґ("'BM ەKqj?(hb!gΟ#Zemwom=^ݕfmm}Ru'?99?IH]@pBR'ߝzE D2%N9 :9% 1M6J뤒:2i1(jNڃHj#kRV9Ϭ FxC{GR9$<@VJgI `ر'A -H*9I׿(ve.J !>LM"X4" \-,Ԅ< BW'Asi"$Z~4^ Mm@,dX뜖ѓ@ZByIq_؃ -$D)4\Q̩"+ {GpMHsepMY ySD9N,R&z`%t(%r9FF<#%N0wSoQ7aYn "B>@^z?xբKYF$ p+T[uHĈ@ ;^팞,Z%',hs,NsP}1iK8;Vr[cd*hL+&>Тq7E&q_: _sP%VH_cz[ {Ǿzob7{t@ azUP|g|7)l]o|տO?NVYR^dW9f.)^ SVMaLSφz}R6luB]2'_#nB q(/Ԏ+ ҙ7rS~(WsN!󎇃8;]f_M [hp L!\-\sJHT]3Eb"'aNz*uV;K:ޕ6r\Kl$Mվ g7/*ErHJT/\dKjdUUgY< ;,\ 3f4j|sWL#,8ZI}o.Ž[t!Y'm|xncSs\ϥWluZhc"Ws](1rj4F ҟƍ:Iケgָ|:|\·K3 f l~a5ȗWy-`;aR/1",DDꥦ+"&ZH0<)c"ҝ/گγ^hhU J/~ UEy3vlz)vcg6?jYe=|s|%T8 y Қ)RR(Ŝ8h6dI \QiMTc]Vb-hq8a[R>j=/5ox}^&|y>|Y3̞1{<cn{m૫3V/+P;HB$UzhRR]=<:AoIvmqF]*kt/Im{I-aZ}}~+:? Rmo8ߴ&lx_:jx)U *K~ ΠoǼU%f^IU[ Ǒ2ţi[$@'=z*V֡qx}_W#I//_lc8.d†E Si`gr&$=O9.%Y rx@WatQ58:Rjsftr#1ObDcw#U6 iO{y}|^۬ dׇm7q4]X޺i&k9zTҰgH4֣>XIzzV1fŴ5R,,mgEڒ$= R e/;p2{N&2(*4eujX$5b=6M p!-]6IvD S[7Β IM ר]x]`d$ùaaR"庠F5vbH0qVv]. 7;7vy詰ki }~)qsywB}B))&[)Q[M*Ffd j0DNx(ḷtZ%X%_p^ݼh޼e܌FFe'E#s)ɿ1 J\B:`^e.DBl{9kWa iU3L1(hL> 'H е>r vL,=#Xx'F ւ x A"Dyf#ZyN5B-B$h>n.·j]>~y2FDJNcRɜRafR1aƝVc  0B'$9ȁg1cAP@Eao''cV}jfmCPZJq D`7(F-R:.jΨ$ bR#=zH2RSDӾ=9qZze7T'OdU4.FP Xk8lM zśgD'FԸ͛oa7Q_+X{|Ѥ|1R&.Jü~ƌ=䇐Uja B 3{7 D5:H&zXA SDS4RL֎> 0y UE-6`MHΐ\:$Uxx 6t }0.jc,~]'X +Շ9B-msW_!޼ZN?1 r8I)U+d%zjM%_n_wZgedb? Sݿ'?[T]fLF358^ߔkKjm׌Dx2nQ[.(j ޻q$i<ì;`ZƓB> ULZ|2}zsx;\{9G%hK6X#ڼya59_'3l$6g ?vɞN/egq8s @w?|~~=&ۿ}oAV`\ 2m@Ϟ¿5 ?=bh0^=4UЊuՂW._a]dŴav*` J~7lm!n~҅Jk‰xWc+0m`⤜n{ bVݚ4Qt3_ʑf|P\@E&}knq{>zNiU.ಳ(.-k[l"TKCH qDN4? ױ`}>~ܙcBfkBdS ^j/.1,f !f1Y0,,F,QTt 0NAߟ`Sd9f'f9.E/ 0Ӹ yL1tI:?}lvKʐL!p)s@NAr AȀ4d_MP-.h9A >6“G qTPT9dDNi,P"[{Gw)cȷszg4qNG-O;kZvzbALIrk`C]J>2K[Ƣc\Fb)ex09sH[GS7beQ*ոKX_W0&&VQR1qV{tHbaJhܥWP1:i;3aOA#7L3=7KMjA=; X!$NPr\08#3l: =%Zv"kY"M={H N/Egnu6: T'T2rj\ξx<UNX$ * ("mнH3osA#SH ayΠFI}x.MV;|Xpvt nlr?La"^nM&ZKk8 tP[L޴nMv܎mƁ-(Ud~hzsZB+hڅM7 e|cRÌgd +bydgs0aԢy[ -88D)]o7ėz]37z Y?$;ҫlXT1N2Uy/R06!Żϲ~% n5K24} c鄕m .Doʹ XZ}ww)Ցk% 0_e?ď{w \\W腀?tڡxJ(/D!5ݒ)+c!*aO 3)~)XxzSٵ;5Wۓx- ,T9$nIAZQjpeDԁT{GE3ߒW3/P0CWdljyv]n?rm^%;WYq|4Nׂgָtӗn.q9[llG-V\`ݴ~冎^F[xd!R&R/5]1тFQ4`S5eLDbEW̭g4 qU ~Oa{UmԔH6cgNz &z,Ki*dcOLDX,-,لDðq I gm\5]s7JBw N3Ji }yʰ;`e3_8g̞1Oǘ['CČ3 3_0OHB$UzhRR]=<:ZIvmqF]*_/Im{I-LZ})F}~+:?Y9P͑PY7[w$oZt6XaY5<4u͆jEegƷcު62/ɤJ|nPѴ-TZSi]}=}U+W8گxVYd{ʫAQxۑ;Zd†E Si+g$O$DhywWs3u [ȉR4gKrY{בeg.7Nor ^G˒igg.l4#+B!ea`pd7ళp;0cKegխn[%3Q>Ll]EVEcnPR]Ю -sC[å*?:߫GOm}SJ+iU&njX@Ð!j4ֺ{ |8*3yq%V3{1Ӎ]R?c4l'*CmLͅ3BN@W=&e:.[19O:b@#  /Z;]DMsJN_>rr`V&kAy8)k^0s@=g͙ѪcPx\Cgl#<3s+9)G+kp5r5Q? w/jk~xrw[iWZuMd&G,3ʲ^(d,e(Fsa)3qhՈĐBn_`#*.%jҀd~-mJ)};R5{h7ޑdΨe ѫ.Pý3X;2 mÑNԶGWф2 Qr#0NS +]7:Obθir]F8<\޾{ݨMN{S Nߊ7i^W$FOO݇MhTrys}vlE~~B #Nm$|0ZmNh2~Z7 'Wo9R\S%99 /bG: ~,oPlΔ ۗdؘn9 oU~.䩠^0i܀rVͶ/U̸: ́B/h9ۜ)"3ߏ./Y;#mnhDd@?ЯGM<򦩅uGoi9j͐ߎھ%jw*h|՘_r. $ YӁi"l;"Htd)JJjc 94c_wKK8gvXEԣCjFjLQkã)h֦,G݂hU&exlS Ϙ>Vѵo8 =oJ~ZGve=S=vg[/~g[%RXG8!p˹ Z-.".s@:#dq ~CU; V1n*S&ѕ *S5nF[JJ4g ec{tYF6TPR0a"sQB4hfѝo%u\Z5mi{Mۦ}i6oި꫸|սfG61Q;3,/Y!G"AT p켉=i F<6&&@,T=y\CFz*Wp{p$D$0H$df{ӓ3d̳y\ICʵFɶv%NDQ] Ara~5WvE N P&%9g%:!#`@Wf^ 4 %m4B#dt Vy)8)}Tg.<2B їSFr(2ġ}&&g|yQ(zLݐ*0_:G!YBDCC˾yzks:1?B :Q-aIRIPNȥIpucs="O(I#9Pb% Jb}:?`*cWy/~)YH";=j06$+-5`L' r4;?dKAYm7EI E)T[H@XXF¼nT6G ٗ*tQDhŴ4ps g>:( BJ%$DPk̈́Q z-.T򒤌Db($sZ2B 1Xsp[F7)(1>O2sc~5>5VtI3d.ʥj{;?d*- ޗ~:+A'ZtȄfZ3\/JwW$m^G)I Z"iƊt5X0N篱+^"-StfY`1+bQm5-4 eMR"z.&g<ױT Tä:D+cӝ&! .(!0@DahUq,'G YeC 95PuC(BY$hZq[.x/滧(/\Ҡw瓏r ŐC9)@׃xZX Q{mB)+MB&MRۣs+x~SzfԖ,6n4pcȱ\ 2eNa̠FGE-T#u3WASYg璑}59?]vC_F0wXJsݴќ ThJ D b1;%ދNĿ L EQA <,ѩ'O0%䕧w.SnF<0Hi/b P2(g .G`".8wdVS*GBPXx\4[nq Hg< i nG9D'm{3m&?xڮp˵]$'_n!>xXsv̄|w$3]tMûsUayJ`sqٻ( -$Eʈt0gfAɨO/&h~\&5 z d8nDi XTi*-@=LFiu`M!Jn$LrCW;q:M?Ȥk-kz3nv38ܴsd(lN{LG\=8n1CimbCM(9myy`^ D2$d(]L(QӞ#Ɍi!-N:>nmy]r7Z-jyUڤ qkWDTn@ k%E@0Ѓ2d Yu歈ZG<āDNbZ9}Pyɫ74ڠ?{Wȑ /kN)?`wϼ̢ĕds0}#HVIL\Xɨ/pcEzK%ײ O8չ e[[˻P(tҙj80C&qNȩJ0x;CE"{D\;UF`3߅B 53ti0V\ ˆB:oz|PqF1b8;P(¼*aA]+&W~ jW-)](`5|%P)F/'I67༥o %x4'}!0NY12DaJmP(t0灘E$qKB9^*tPp2kG>\0xBb-V(X dyME! rυ(%)(xc!&PrP(1ŷ:eZl-hƃ n)@o. 0O0G%1!][s S-V[t U^ZL!`p WvO מRХJ0O. r֊2D 9-"ыHH24xu](`^A)XQ .DX h fV k.Y} C{3C?^|V&ї`h29n$pzY.iMp(t9{S5~tSօy*=^BHLkB<.hMĒ\To$ց}2 ZݣX& L 'Xֽ~JoC2ANNM㳅 #VWa++~hy't!}Wra{\r+ieOpa|8Su3YZTFDQhPmRq 4^U"2y`.l?lᏰh>^}͛ߛPZ*i/P?~ ?|HҼ<ez-h2tr>)"'(%:Vb-ӟηB*Sy+ G0|m/ާ1 4 =Lcm{潝t{kt_bZv)/f s}:d7'%!I-s֊(\DFraW43-zCHi'Io>q.O ~[s^R4&Ҏs8^Z 89 m-*ʈ}t&DN3ccI2&q"Os):h.]$fN(6thB?̖OHuOVu %{rrdY>6`iz ֵ|n^.1ƪMo)!BBE&T/.=3^p 3'SрՑ&"a`$<cʂD'ZkqxNhjㄹsp M8׾{@lr0kQY^N, +kUaov#;xclvSݼ~ mt w'=+MK\<ɃS]gf ttzgW0 Sx{  atчrf-DcZ*լH#Q#ZWۉ7.(<,u@VOeqj 8f"!"G2'xF yS?9{hƑg>'`p: *ݎ_#lq!>ʺ:Z77W{G`@|tKSX1o`򏇥WO{u~/wH9u,Həjh<&F 7 "j$FY$:`q.ͣl=rk`vGk =)̀'Ÿ/'w^5x94˟&NfiZalY7ŠN<*e@ oOgZ:twnXgjV0$݄'ҫL^r YMk^!Ц EpXw8U7mo.gУsqnO3{JmEԑF5^zr+ n{ د<̽NJrOqi( G\+B`x -&i l0.H$?bֿl&0_*ՙ}`p2x| ]ݫ*n2ON)g:S },Od[xsCnwm?<6I\?ZG1l*y{eS໿?}vѢR5˪-z-a& Iy.fhN'#% 82R(o iq. QSG|pveUnHs"OVC 7nÛKmykÛe^~RYDp G1#aroPnq8f8ƞFƔDמ$v !pRHƲmDv}wrf8!q3ҷ۲ed%=BzJ0̎Y"<q8 W_cTV`y03Ud9Y κxz=3;-3 nC: f ~:?b{7V,K8<:Yfۂkne Mlų,i+|js=yujXݴ. 's>.Ba5~!q<&ɛHka '=6 gP:ϩSJ q~۔풑Zofly54:)LZ',h8T*L'c3,7A7L2'H{CX "|i,qނI.:EKTx-ŦӮ;q+39)bo{]n߻߯= yFOKq <f^/SVD2!α@.j`- Mcy˃5Iٝ9+=q,1!P- KߴPJap\i,KAcQuN>mbg[>d{Aݝ &|~.uӺ92pLNnC)4G1 "%hrZ$ ~6GGJvqSBHc:`:m:pFd^Qc\Sn҅&Ns V)[$-x43zMTF K@Ii!.*PՖrgr Fb8f{- z{N+4TNÞkCWm  s!F1gѬu;35YjIHDRK8G"(J'$LJC}s r`L4!&`0pʘp1Aapň'tt1"Xha_w>>p=Cӹ%!zUpm|$4!Dq*Q[f|$@ሸRi {=s^p 3'SрՑ&XE **H,yʪ-e{a(T1HTob 悷y$4{@lr0kQY^N6Wת\n|8^!1[<=G;lVw?5Zhfl-];d^t%m펶5ɃS;m١@Ν]4Onv衖Gztzݚ]Zv}@y5D嵑!G立q2?vPφx &8z2׸ʓDCQi.-Os$֡XzonN I Amޕ$W)`]Rއc=kcv7!iH5žFK,.ðLV22*/"##F]#ѮhHk$5>?ļkaUd*kٵZEv"Va=\C36Gڍ>9A`'~}=;k3|%gS 3Czt] y ` D:T[=coG޸TGT˅37ۂT޺/t:Y=f :F~L@h\??NqpfLT3\ fOA Rc߿x Q{L̺?͍E/-yݝ9٘Dko}*ﭥ!_>kYetz(`m|{ s\10w~ťZp])jCe;\bvy<\; mЫ#+VPdy15ޫ \6Rۆ=zDm۰qIn?.T[;+J tkz<ϧiz*Ľ-8Lkxti"$UM> کef{n[=1UK1$77;qgi*-8DDC_E_).Gv '݊ᔷI ?^PoЛ߳b`$"ႰCoZ❶NkN m81kЗ$.B4&={.6uu"Kh]iR[cWE~C}[MV܃l%RHKR&ULx2)Ϧq&\j*{fA`rf|̭2\b#Xܻ|>}+Jy~+GRNq,z"bK(F[Ʈ蚮aFhNPv -logb sYNN,^|s17PܣBR.Ó 65DШ$B]"2Zeϓ6hPix`,(3xn23Ńt;3+  S6DM8BIO!(@"UsJ0pVmqdrvb-=ێ!(xQR[MR(prw$9vHH3*H̼(RH aH$SY! ` &Ć0H‘-l.7q>{<[ gB䌂Ⴁ#"Ax 5[-B~E+4t˫wfq=p5G'`&Q؏\? dԠ\} 7)ThH8E߉ EWQNEE d+gtVL s%2ZdD2p]K+WGˊqBV"@.7\ɜGsϭJI>1+r,ZO`xShU[AXh,B3rcQ 4|wdr[𙬹7Ŗ.jw8L|n˪C.6)I0&xЌ"$gJjE jA,!|hUs4x 62΀6`#*$;AiY[#gY@G$6ϯ=YzK3^@+ǂ6QlMl)lІ'DF=lz2٠D.O%efna?*\fazQP&C68ptY`YxYUEHn]h)l#\[խ˱\Vϒ3)Q PYβWo*2dϪ+?'./2L4g0@v}V,`2w:n$inьԴ vyU/ޯ7<]`caO2hD9β\r`fFf0(gfOۯ/Q]9&uFU'JI) Ӗ5JJL yב+M' i3DbnZ($ל;\.G}=;`. (Uz{hNP3wѭ|Ҕ.rI6VB $kV*q_MIƅ8OK}E?_yuQ>e+dZ; B#L9F'Swbpe7' yu dԐ85iatH֐&3r 0.dE0}ӌ;?>3 |+r}_Mvm CRq 8'lX*ǚ12!篛ȈF)Kp!at1R0)[SuwVޞcz:8_gk-3b {/KR=BDmtRt94sa!0Aq$׏t4 ia#(aaɓEQf|UoarQ Cꡩb -ۜuƥ)7{K}b%h{n[ SnZѧҀ\6WlPقQPe|TjF Q!oӲݝcc&n nm%8c0Joxh2u}H F_m@ vM c@X#(wekL֏bsu4(!oBKϑ6 L2J$h-Vp#.9ّ>煪3$=IBp#|B\ *URU@tPDy0[jeqoK&t6ٱdg+u+Nk0`W{X5&-I_֊l ~WPC(PIBcG(3Ao &cu?a^oo͋4\U=i{$& ySUVWUoC|Ŋ޿yB{ɱ6$Qs-9˝Rb M{dۊxw]T ڭ̾$G7;Q .f7n}f-!,νV ח0cL EjCڼ*07}tڸ8cd%*ǮX NWn*\*{ސI^b;og3>b<+nd+bᘪz΃{T5#X@z!̨̘`K>Np?w=/d92k2[4zmd[~lu0  pjaqJvG )UDUT#pSu+>#p jN~_~>|eFhCp5 g#$pJTT0+,q01V|!u!XcV q"6 ZO$(}MMƜP$h=@n-L+bMje&PPj!Dw24f,`ZFL&Z ihX֔U*~$K1r%TH2O"@2A2f& Ѣ Ƈdi ^pPfxAcFPHcYSTKۃ-Af\/CҡA2u2>2BA@lmW2-$Hu^4hǨdt:Jä9 ~TIZhC)(8>&x@PaVDCk" FQ+&v hUE)ĕbb(cD+l Aa7@w2$Cyh Ҕc?{Ƒ]@C~da`%SCxDΒb{N59$g͡ĖF.[6,^V9UunPՀ\I1~hd\AS@`)ĐiHY03ulO}UqSU5J$Xu޹TEoQX$<@Ac#f4!vAd I(oL tSȆA[**dY){h*3jƀ(Q.(NEufk== 0|_ A!B]Rl-sKK u$$vlB2L"bqѳVUQc%WM'15J1 RN5nʰkQMk?fR3&i0tjܝukk7,3V̪(N1ƀ/N!'xğaZq=Ѯ3J`"s[ Nz- ] &W$ZʍUz| Lk׃`Kj,(t_x(|"H&jZV"!@T. UJhmy*{4nh=dhEYR:[ģ&@܊`mmKmMFESU_~(=Ċyn[&sJ $'a O6Vx`Ut9ΡN‚.&jH- \{#@ ztm^.-Ũ<$ xWSSe@*n FYu,:HcBQhv)0ְc[8 9@g@!t9hs^u@иchdr[F/~ %dgZA.mtԙti`J fB޼jPR\ ^`v,Zz7*-Ia. f|t*2 cmcHu+m0ZҘW7vNIWXVmڦ9-k3 5 @][LqtU  G6TlwS4G8ݔҺQfqu$^kpy(Y5X`4vfMG0u/H|Tk>sp ś !u>fTs\޺!yDh搟kh+G JTJ/MT@=BBpH>Jk$=|P@zjq^#úec4BZuEr\O.A"N䏺lgV0 .TmBF8RŢ4xT##>j CW=kU4~ aQp=+NFp `NiuƲ)"725Yn??byP`ҧ"pՔq'k3k$Gw \-EƛZޗ9쎲j_{F (5rfF8ae%3=f@D2/Lčr Kx>*!9mFِCL*V/:LD] 124+&)"0"pŒcL\N}ܻ:KckT];g P GQviml 0"*iD0Bu)0"\S o~u(vJ݁ʮ PZg#U!Tu!(p}gK a,?2rMf !Rb7k*S >Щ#fuͧ>x?O7C;^ v=ߧ>ݴ~xs*;r4} *Ӌ%k\za?I >P:tWٻҵپ7aCqwG)r}N|le*)SؒꜞK vlK\KcGJ (`_V*AJoA ݧ/"5޾\Յr[n5?NCD89SQ\uLic|;X6b<ns/Ge=gdϓ޶m׋yq9PL"EalC޼q8*3NiakuCAB9X 6^ M$9m27i6W >es=9;Uto||}|%#ECLijT%Q ᫨%c4,Z&KN}<'3t׍9v}ɝV[D ~|`k"Y^"Oѓ@QX<;xlofTEUZ'=§\Hdw=߾;Ӭh~VSt]o[޶>kMZN.O懇]_bxWtSȻjrKdU1 )Z^Ʀ&W mgeRm;4Y7Fx@)P"jcCNsbU;>'E0l*ږAEsMW\!a((ƾCyj ,1SQQ}H;A٢?@SdUg@|`}zӽ4)zy7>bϝDaڮjvڮRUjJmW*]vڮRUjJmW*]vڮRUjJmW*]vڮRUjJmW*]vڮRUjJmW*]vڮRUjJmW7vQU :\ΥjV/j0j-]}BAJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%зj@5Ο 6Jv|J Xi)E%P2"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R};JG땒OⷋNQep@7S)y:wf "I `vg#\V~%X$ ҃[9tJ&]5syJO%$7Mۉ o_ ;kǪ՝t>ILլҞyE;S j+BQGmlFTYlmOVeTQ4턣T'NS?"hSmB1)SZPsՈLM1 f/)|xX qz{S#$əN. L'MqbrrQtwprrӷ̹.vR]OC\@萝 ,v1DyſGXTpn[ lU{Tţ¢V\0CAV-!H|.`:cY g. xvݎcJo2;v3v7;[g8/q&vsr3BWܳ_f}P0\O,fL{~` 4z=nj>eMez{}V e~;>]9cosڲ7Ig^.m3tOֵ Gu༝i] DY84A)[Laێ&}ϧkٜ{a;1w n%bbKvEݿvdzC|S2Mݻ>*,6kMW?k+S;ga~ /%r|h6Mut'pUY,:q{5&׏8w^wkXd~O={7_jw&7>*,vޝ姿CwR\y>>#_y?ak#.C;)t+=MUa%Tmm2JpW'}u26Ad"7 %ܷ&dѥV},>Yb;+&hLh;l锪8bbHns >:gӧMtW$+6w'ޅ2Q6DmPjtaA>&DzC\0.Y+ 2a Pzё95M.ߑ|AmO8"͜MWk 5Sdk8.n64ikGn6;y2?bFeUg7~L;ȃ-flu7CQwaIABW]ݭ><դ17%{_ð7viއv-6,rw6peK hOziu:k']]|<1 hAVtWn3=&fWT20,Ք Lά{%Z LTѕ K*N.ٖAh󀹛y▱ŝ(2<E,!]Xnu '1beʥSf:k0^MLryS+$<3@cĜESnT\k%izqWn&;=SHt!iX_$XBfz^,<Y3cC)l0Ce2lw,S* ]y;c>%W 1NJq3 =%Zv"_hY"0A;6ޔWyw&r .`k U cJ#68R$#Z{ =\TknlDuK|\^}IamtWJ;Hu&H+|/ ,s/Mr`)*1P/Q` e +gI1!Xj ^'RubV'N?)&!D d}C;bG8g&gQHwSՔ*B)r $JCx xc΃4&r'Z($C K)FDOALi cdeHE+J8Mr8UtQQ,rh͜g} )_ě(ę/'Fs5z*HԘ$e+R4 @k>zƽQQ0S@HF4t›ExkvI+7hTFCKΈ&K]+]rVξ9}C@E0`=D8ՊOe,3X81BtJhsc0uΑO /(.AVܹhoax )e4*̭ TKB ,ZeP! &XT`jBǣX;39;u{#T{mm>i ɾj}ir_&j~g'_'S̓`Q caX Br*j4E"M `0*9h "h6ʌ ;0CuۨQ1,%! W9>:d2Kp6r6KJx<Mb˳$~SdM,p7-XŦ,5vEgYɲdbK 6bz0"e@Q! -ܩ3#YY.ړ#ZY@!auʨXzW#.ivH2HHd[ʕǥ1@,r) fg$pj]ԜQѱYؿ|O5m%HALaýX;^ >j2x,x+) -) S%;\I8߾Hlz#;?{"nU7٧O]-D?K KA*0Iz15JP>CLkK=E?_W1WGdL&WKH)D0Gts'So (0E_  Ίʎqkr鐬Q WaMfgXP_a( '|ud|=/cx=Jԫ+ciJ?dK*f"LX7nV0dqΦ6`-$?@s0uo<c/Ճ& b@Ō?tm~Y9GIuݽw?2ZF"چ!p:<Aa8t`ZŴf˗1ɚQZG%h$FmJI:B##i`إJ_<FӓWz :HTOkY8}s{`??/??&û`$tŽ&@,0^34UleY Ÿr˸4 WmJ(ןoҥZ[u^viՄ|+@64cc칺/R%R3܃T h Nv8m6!##1[S(FT&e؜<6`^/?Gf @X#(w eiu֏MyhWc:vKʐL! V  &%$DҖ+h02s;o!^' 1 qTIPT9dDbA孃w@*!eծ{ծZv^)= xkҤS ['ފsm @.=v|EL]zцD6|EL3d¤5+?QA)3Z:UNM19|Si[W_!㼺]rmK6]w馑4,f CCZ59FRL@&A07 ,3AI֌٬ףl.ԝ.*]8(m- |=#?YV/AaїACRE# if:LRȷMo1>:^B&:ǭG^vBLt c%ģ`T Qz>VW#|Q9r6婊#"e ;,RNs6KwnJ/}4nP FwwI!V5J^}~ jwI'7O#/|"/;aR/1",DDꥦ#&ZH0|BHgΰ:aZuҍzX*:L[ާʦ X? Detrsr%4UNf2DOiq"k/%%D )XZYDðq$ܤ e߹m+ ݭ-hNQfs|>de;\!`vy>\; m3 V'' 7_ȸE^^ܘZXz+ǶMD}x}{"39t\.AiKrK.Nxa!Ө1_.O Y$`֙cÎ<ӆϵ3G&^kBZh-b&gA}\RN٤ L>J#֜?Y܄hhA@2Зʪ{4axcUB.H-tX#uZhÙqXD;cw .]Mp,mhYw@Qo- }*PԴA \Јsfʵ!,kPPJ۟z9Jљ =H;Jn4/N 0#B(kB.0pfp^Iwok2j;(«`t;ޯیUP.wݘy,![nu ONA/,, NWE'N~忈3ZUC}brW{U9"RoU[33з6Y.nDt cSs/ܻbCvȼBcaFE1Z^*8haO(!_;ؙ;y& 4WZ) Njs4V~\:kBhpV,3xh} f;9G0@┞:|0^ OB+R1Nr͉ÊN>RQ-HrNEN8M2W lӁ#FYZtioۍ,=V6sp-"rovJf/hlؿ8u% |He:fM'ة˫|V۫=p.^O!SY]sr?+`,MɫM) KY?J2M% r&Kn"'JQ]ͭ$GZe$.]7p6A.m=3YE\x@q(Ed x"SgkP߄:4ҩg =ArJ+'>?]|EۘW),ij<>O$pD j.-5+YGWYDMj6'T- 6:c(j`k.PHx3jg>MLj9+j$9pU r'0˲'}x'ȥېuzٷPFn >,4,rEG֗{֭HgŮ H N.6 LSu%5᳻PD2[Er0g!;<]D5rxJC#Oޓz1wa`]{K5Q G(~jcM0&1sxܗ ͅheXo.`R ϪGjD{ڥ!:wI{ɱ$@rFϵ,wJS֢ywiػs.`9N۰:cnao{,hpŞ=$^9=tYHδ^X|0JFήmp88s ,i:uBKݔ a//(0B, N(86qu^D| "U IB6'tCXхeB$Z Es% .bszJu^ YZ A2 \ wD|+,T QQɏ(=MGXSZNء$()=GLOޙ]޵d'pygn`"C!fH |rVk/;^sXeO- x FCs.uo)|˔D3(ȹ̥@ip5jB$&bR9c6([ARLN6W2w2[2`=qP|R6DM8BI@^QA#*9% 2F)v&<185]C(bT>j=?"N 3웴ƆCvM|̊I:z"HH|:8j/}5j1At{}MZL `u7V_N4 TZhqo:b!։*-9-5'mx!Ѯ2Ma5`={(D=cðyƸk8#1Y-Musu h81BtJhsc04U^V\Zy@UkAT(A&$<0(0P- 1hBY g 1ǢPڨH rtY+냉KM0+Cʀ^W|`K==?gA1]Lvlb\B]o\V>;>ڟb zRT fI0sg@ϫ #N|r]ͷk8A9=GtK6[ADa 1MPvtKjSsOo>o,^vE8S w0 e3VG(7 d-uÓUȹq>U2W/Әc> 2 ûK%~imlt7v?S/Hͫz)[,Gvݤddkܜ10in9ged1xac9C$ KR@sن!|;D-b>SwbG<6`eC[v͚Enx@c#g !F4%ʕǥ1@,r(6jA:ظ&S&aJ>5%<JkYƙi^Xeu,IpʗɉU&e!(z-}&]Ul[8|c%Xx'F ւ x A"Dyb#ZyN5Ե54" >C:͂43QH'Ḛ4N+!HRAXz,*5& q$N簈U?5Af(h-%8hl T 8h3*N$4hEH$7†{Ov&20d7uBh8Ѳi([t"f ^^%Q YNw5gqE$?vǗ"g#+YDR1޷Fb$qa}I7%b /ϓ fer0wc /"9[P&we h()F"֕bZf8 7s ޴BGk@0! H ˥Ch\˼\ vce¢K,>OWmE@pMHƫ+jm>?|wS5c e ( ԸLSpV\RR097Yy{rb\oŅ׷`ݍuTvP};* \\tq;!T1Ar$ƗtS5 tydCW^YO=nn٘h(fz$FJ͈daB#i`ޠ0,[p gKuY¶pSvv񧏿|{混חzo?\b./~k8u~%@|km#fM MZ69&\1=BmZ_wХM?*+b RHԜLg~ d~3˻+ ~*_Bnʲ d*p(\8:Ji*jx*j2[^Fiw&f8kVOp=\*ϑY'2<",iR;^㏭h\Re 9,=G>ؤ0JpPY(@! X #O[G&9/HC!4ņN@`#A[NJi{h,"mmN.N.vXvKW}OS(!in;p/7[X1c2b=6M BZ"-\W9WdLԜ qJɇ#ax*lX"'h6 K=e94 #L9̭JD%f1XQDg' q8HB9h ˽)tCYOjPIG fpl/|moݣ]TXSEWprTXWyS2aԮ(~?W],*쵲D{&Nqnb NyIw1 lP2zEPPG $=&&x#3ٻ6dW}~0.N6nY5~J)R&)b>!))$ kzNuW ^`A ƜF*e&f˸f'e gߦ zdI9st7r5ί.(L7b0T@OP, fEZeH )5LEt!"'Uli<'LC"OUA803 $H @ !C=rAiL 7}XcHǴ Z(QHLd$&E:łKjH\ˡc j>55lJ1M\}/4Q[lrarb;ߜѦ(F2L H)oS$=}IZ}Dv '!i:!sdUWS1W -EGo+0dzW_3ڔ$o4 ʟo~ǟ/0Y,Symr~:s o|cTZA2$!ĘJ=E$QI*S zP/MAtO]%i'T%ѺTy]~wg_L; ޹KwK='@`.ɸ>WAIZys[r\BJ! ݛqW Ô6z}c0EA3XR?p2ne+\ӣv< Wd+PȢ,b"S-n9c"#%G)6(5ĬDˍ@A[@DZ FqGk@bbD h$yaf SG27>/F02oƛlV.)FW'l9tEm}gNr`r5E>)r3ڦ:bu.yv ZGK!zƌƁZ1b"iZ+Io\йqπ%ՠ'-|g?.Mu4# f:_,?c-; u޾s eM/f@zK)EВ)E 0[<^ǁ^{%{nB*/oDމ5RR))1'%><$⧒ L=!I)y4!dՖ."_TQ<(J^0"Y;|T{GE3Fz/F1/gRY)Nh\ ./(ukh+4|+%P!HED-((謑:p>aR/1",DDꥦ`#&ZH0<)c"CI8 ^6l/rX&׭_pME_Ճh^IC+/=4QS<Ǩ0s՛VoRod =!"}X{))Jnhb$#9&9VA@gS_p4vj,^N<+5jW&N2̍<v 1M*j!wb<1X#uZhÙq͘N7s`qKLp09yc#MGohR6 OnwfgFRKs&;^ƙb| Y0@XXeĩ *Y{ʲm%K~ER0?jNS%< hQ lӁ#e)Nj7Z*=V2sQ`Eʽփ:aFhNP 68d#v0+o&`ly'(O}Z8yF`B soZyE[8,hUA"vσŵ?+\bu@T6J!^ؠLaL )&-݅H>|j6ldlJ)C=/eCԄ# Y83 퍬lՖ90Kܰ&҅OADh'qd8'uYćQcot-:::H 7tR^r-Gϸ7Jzb1fuZJJ ddNcKMYFoǤM4eА0%[hc9Y@ Q |7o$s:s7]5b6 tܛp=(2D81BtJBhsc0Ξ'1c ]ԜQ ]ߵ\V6{3Q! )G˂ܒ`ܢV*c>k^ ,ΩNwm竩"Tc KY10 `I:z1]atc\uu];?29S=E?_=ëģ},ikj's HS`)"7R J1uf4 ׶ww>>=]&@0Y11P& -,ɒ` k2AtE"'1;_ʙW]Ɣ%7O["^X3FPXsUCdDц-+p!ax1RaCg;i>uTgޕ'_΃€0k{˫oD 㲪+ӟvn/0Z"7ꖮ!p`yCP(ԋɓIGfmv5G1klNզgRBGHj`ҭoG`b'͈3VB1i=spScͺtuo`8__w?￿{u=0.ko*3$A;pYITm9-ڥ(#7{O}bFzvԷ Dί_#RT&_OZtyj}HA;~*Gu\}R,J%RSܵ/M 8 4/cZ{{1Hha4xe̖T1Qz;M1>u3$·JExJE3AF!,f*Q:x,l,Bj1(TН<=رuX NW+0`o1 oobZxG13/t%a:g7$l Z˗pV|sVSKޜGޙ]хCn9|6`,G vW'a'i1v̳d֧@/Jd jg\gl?eb*q:IEp w p~"o:y-MA87@m^w~~SNu{ɋܔ;aT@o?wju0jZ&yFc&}k?g GFSZ*/ø5ɒ+?QA)3Z:'&BĘlªҜ&[p^zpʁ4c1֙vKܜCyw]^0iF*&۞/] AVr1\0>ym w㒦W_?_ XkVM|N[(N#t|Ŗ{0h8;60\DJESgg&?i& Ѥ-- £'cx^NOe&#Q(1veZ30{-#c^?j%!ƽKd7#갧 !GF!Bt$)Yhe8/:C)K (E.(qCN@o$TJ͹K*.%vrW0:gqz*sn쏇weQC"M__sNRwrv־+w*kQ&Cn?2ŶB`Qa%3ts:@CV8D){wmmKz9/7~*>'`q 6bkpNQyuq H;YY9iV'ҠsE^:Źpt3ĜKj|Ub1U<[#5Do6E2MC|ԥpl\2=fIstY4an`\ j7[`͢i ʪ&Vq 㕔6dF` !}t:,g)Dwx߄Ꭓ]R2vԲ% B ɀEB jH /Ho8D"4!c+oZIk%甴gy071^Q yfMzcp-x}JJJ+~!|qTtI,{9J[*"W^!J>F&5gښ3{Xs"T)Y 76puX!dkK vbTJj&tJRe:C* yZ?aa27]1r֫B%Ug2H%QUNbw1h  W>ZTLW5q0qSBɰcRey1moVW2*UFFIz"RLaRM"S"2mkkI+𢛔}ӻOῬ(evMUxs(iAuvEKȠ1oQQQZ_ZW֏wOtfMnx|6eE-g{twΧiR6yPov4ϼ oS$t<-wyG2:Ysf|[jS-yI\šR}E% Ֆ[km(ND@UKrwWDS~s#IM7G 5$wxe`a/}7u'GѮ'@{T;s"04uN;S3ؽO-[S[hrn>'R>ڦH)}u=+rf$$r5P+',Tn~ VU!Jp@ \j5;\YV[vJrpF\IW\y0UsYWJmZ U\w\胁B.CB1WJ7 WB~+N @-n˔iق߻yKM^A N}jyI|t}v!h=:\ :_ө: 8_>uXL (xTb٨ .%>.E "1o_̀_^я=qBTBrL5h9&+dB앒ܪSJ13Rm;E| ? ׍90[qg(6)s'p2GYogZ)yf "Jr4B9Lc i 7EONL}vK;OdN>[ePB9ܽP) R87Qx WAKuhRtVp_hOUsD ef'ѳdYh,Dp(UYF. }%POp[pv6a}jl72 _0}̍[[svewPnBjyE7Xkژ[ZCNZ}ghì@:XYɖC/ 4g_T^ٖBY0(r$EP2*iKoHF811QϘYTV86B ;R+/|f8-`RdǦY6Fz[0q8JX?ȑTJlh|spMd9нƗ/mǿTEKxڬvI"D8u1!:o$z{ijNJr|Mi1KZSuǼ;F h,FL%viJ.(pJt!L,(+\BJyϹNZ8m6&,lH'*cKbUx!띶14&![2 DIk6 ,fmHLI:sAzDnӑYO*kXl _n}V<>Q8`1,هPi|6,gV%2ĄZ[/[ni#AB<˽JIr}t7&TIb]&J *VښzKہBzG 3l;KW0=YsW<"p|c!tSXciTat#~Z|0.ٝIRMȃIMI;wp-9g\2q|Ŏ?-%؏Fxz7ǣ11\57m5CY9}d ԮLdZĜtCҍ")煋_4\/0u|kOG\HD3fijKdrSrNǍZѐz4EG״V\=*5)fwiں3i^܌vkr酳EbHKϽJw; "bم'7؊+GHQl01aPc`ŲDn^vǏ*W.>&y"uFGo"TLw4$/I,@Phia"^^zh*=w:%8 $ZY4AX$"$5O)=ՉnHD߂bzxxUY n ]7]ȵ1V@Ņ͊suydҨ(uEz}rC6ne լ:Ėpپu{ƛ;o[r=?䊑>zV 2og%f֬ׄ,Xs>ymC7<}.T>wT[o6W /fYDŽp]ⴀXQc*(aY0yY 𧓞80JKw2wQg־QJ$e QG%¤|sidX`Jyc>WAʙmeݕSZ;DμB^G{pJ5Moc{@aondưR &/^H)Gyւ6$R\R gDPɜ}p̷U.c}Ivt %msJJMd3BDN H+-K!G CjoWzWzèrRrm*JTb}E=(ۿTiE]W{zU򶘝KA`(IABؐ *PTy _ ӥ _ :PCѲsj7D #^zõ AKG'DAMq?rN6sid TkC#u EL!Bk\@ة hiJ#AԲ9OV dV}ێKUzNƐ# <#χx府Zo*0EJLo^Ƽ)WGd^#ˣ13żJu:S{9 6 6 \er_k2Bv2Rp ኃ\!E&ıUVURp XY`O07}/ +f|jf zh:AE t2\McQPVPzE5' 4ŷw`V ޷"LgrH0\2,z~F0-k#r6UL>(8ׅ0.8BՎˊE &Czo 1k0?ޙ _:0G*>1^P!\^}5lrP~^\{΍kou/L"`MRRm ;z *"{mew)5gm=ξNcE?*o,V\\V?74|Zڨ&QVNp?!ǜ&;8GݩTIqD*,drӀJzR 9䚣9Aj c4SzzpQ\e 0SK(H}s+M4kc, }e̔?o emedgQQZB}: "WS ^ B_Z&ţZĬt`&əVh+F2eЌ yNrk%`%` 2_&"Χ2i ;rԼQlb/c^m/_^fC|/_`g~>_hzz>ϠLk=p$oGGrVeڮ6ͭ 5ݭTݯX+ϯ@ҡpT6F)ijb (w[ %QMnpR9&[+ϲv:%TW y$*T/y2.ǃ i@gQ( c&S$Nȴ@[4(ÈJd|Is[Wu=9רI'Sh,lZ-4NsI;*+IZ]間gӫP$]~|"O*I9zoӑV=U 9kHwc)$**|4 F;fHBXF8ۙRCP!6:kjV)ɇR8e=+*``c)U1H*h ۄ 62Ffd\ []XxTX(mvozWXޝg^myU٨\uŽF/ӧxbJב@ F,ƔJ(") giҊKQkZEly\lrYM.s:!'34{D&D-#vklFl7%+wڌM=j O5*nϽ\D)}21i`h4$yky|GSU0g( Cya}U^u?|OU@#x?eLfW< h daPN99Ss-ɍN৓82E$Z@JDgc '@w 7\ &Eq\T_/bhe 9fpe__5UHuu"[DjNgo ZFu.jj%9"cZ4sjZ͈/+wo'?U|9;oxC J\~hxv^-7k(BVz||܎;@Ȋ+GqHV VS'wefyxRa`^żOƣBc? gk&)'jUs5Jk!))CHX {2R^_i=&1N 1k:ԝ ?8Gv??}??Ӈ߾@_o>{u+07U$yEo"Gpgq\6C6g,rW{G|9I[+˯ߏ爷yBuwͺZAvaAr,}~ݼ7pQQ? B4)U&ݩ6Z6][o[9+Fff1/[~`v~ x(#)I'x$ٲ)HbKUa*:<=<1f+pLr`Ft1Bト<3$cRQMQM%D06n Z=BU9ۮdoV[GeoٛwEqQTKޝ&^=tIڀ|[!FѴGL(d!>.r h:_oH``}LikZ{T 7dMמ%[pY:8qB1;3>$y1 >Km̱.?omTDlT3~|z179Ewie9Lbt1zз4c翡Q_\<4NrcGx6?'-X}Sv;,d+]A/Ƽ;h4=5[.[/amvp Z|H+YRBOX"(Tgbf$)ނL-nHuRmqRmխ FG I-ql[rJS)ĴTr*Sh^%= $v]K!,Na]4Kuf`*CV2pg2^_"}<Н9 [{r9k9m"%◰o?fvΞBܙVz4b brRˏֈEXA$h"u$F\AH8^2H0*y-49xVz*SNs@QS"Lf;ouə9ki'zY"쎮]c`rhIr uG1o:[l݌&ۻvtzf}6%+6Ozoe=ܾɖVn>k~/|GѥOtzF%Inijo2n y%sR Om5B!fyCJ v+x6jɂ$ H0;,'uTF҆7U󬛏o fQhZR RiTm'_k%ڬQQk,ԶZત+#6<hzÏWQ{2 #2\]Ի@n?7j!T)e>5ŬR/y{5_$9s^D@\̐`䲈 4יqXsjE=^ׂX-эo9Y6|n{Mٺ\.&@bش/uSngϹ{uS0klЛAo:7LCt4X*٨UIp%G-L{&!{7e䱂g.S:CK6֨}.Q9 \~Agw^6{ 9(栘SG-zuyf⅕bk7Z~1,}wNJw uЖZ62n[^6zL\ܣ`K5~tD[~$_pFg' p^X~Ľ#a'jNo.UllxbVY-׵=umY)2Y^(}jªN]Y!j}3C>@y5(j68Ny+N*F߀^vzL N*.yR98%&|ZGUx*c1:o{_QoS(OSTsڣU5ZQj{Sՠ۟EE.W5(R*:T5xU 'Bk'Yv~_GV~p)ؔ%4&G/!kf:(̤RЉ.R ةn0 R-%0*2},b.$t6E)Qr{}k62c=,1DUy.dtG4; 5Qȕ}j<.Ի!Qyµc(.(; !_'*eN ~`{W fpkbO*SZnGhUv^teF%M U#ROЌz;@P!0úg>> YE  mVndVDKYoCA*[WZu#ΧȾIijy6#N}N}p;MFiӆOoY4 \ŏ9z$@x-\r{YӅ:uhiM BVPEp%mPsf,eJ k9duJ>]q6]}Ozw}9kO崭S5PLPOך5j2]>%ɿ<˳ U\p H|DO9T 嫆U-_hY8k0+W\XhB/.{2R'ApdprQ5x23s0s+Zն^9\a llB$zթQ=?s^t` ttd+-('Ly+JI:+ |RiުjpLh.Z\L284#' ֜ˠJ H́\Mg=.r=z_Cmœ6jJ~YɳTL(z/i \A};JV|r+5kYDGPOK{)&#s&`*"hf*TAh5pٿV$0wl{vfr`.AP eA czb 3:r3I(e^Ay(/41v&/Gx (cfU1(0צ7"} Ǡc 7RqNӔG)'M$F(_z3ܓn[&e9+{,<|h4}%t]{)zF+=2(ղ7FCQ-juR2\>"1W(f/H+;kFu0Wo\^?Rad2i/k.z/Fqt/Kp(_~N拦gsgI#L_0L7"c?}b^1oF@i/fnϻLE{\ xE3̷8Lw57avw,:|$3|!)(@Q>@{2.`O:3A&$4>`.fonoj-+KxSﱕ_~g#=` wfpx{ V) =(I)Bo~Hi1Tr#s)Ke_UVv>S4f0Wo\i*ToPu"}1WEZnkA\sU葹*Ӵ?檈i_U:PJKt&3\;+W &4WQ?/\?s\e6Qd JY #3C(OBHxwb[>TʡLfF&fԺd<G&['YFDxԃ%YS4LY?'ۻIXg+Mޗ.ŅbdJy56+5whc>7?}_w~ӻ_5hw@Y'u6uF0`1[관{7':%ZxmJ>eLc:3#s;+U=W62譳V;MR<ކ0¢1XviP"[EFYbQ'3zMEVqZB6$%+?x`! \H)/ךPq8U4]M9DiJEdE,V `T"IPD#SNyO3؋c';ʓ6)jt3CZdB6_%M/J_ԸHl3qLyUpY9GwAH=7nmu5iG{ fj>*+dd1!?{WF nc&v{;;^!OkIbF(b%(RrhE2#"22X*h0*eB lBao>8 tB)bc 3jc"s*.(CR08 [#a$✟LY|ݳyVXy~Wnc&y0<ٖun@.lbTS"WQn#glLHn^ZCuS+ăAP‘26 t7A"P 61*R%ϕ(+ D)OFp&3vTS#F,X#YRn11HiX$c`$w.#2qR!n7r ῖ<.D1(/Rs7AZ~sp'8 Iz?ef/_e̴].7]{yU{+dMs6~F49)az6gJ2F?ivg808Z'+yGgc~0"Jj2OW‚E"8,2"/Vo#b(?į-IJ?U?ի|#D KMl0=ZU&2bl1Ix*L.n<ێz߻3uo<]7_]Ygk. b0 WevmEpԆ̊op8c\3Ťv$.֏t]7 f\;aypT(XYBOo_g&ͣVnFi}$|+ 8>M$7S_=Fk{r :5lT™cY8+8?wo^~P}7~} ZXY9;G {Ͻx o~{мajhyU. 9qHۓ^&z[ւ{a?9>kuZK[ Vq?,_k-ܥJU:"D z#m6]4dӫ-)4JGpޝ, y51qGAj+aq؈fuO*g(GJ߷Qǒ5 8k !*!QtQSVhh s>wϨ`o!9\F?a!1S:3N:boJXY#&j$.ZTbZ;;Ŗ6cd׫2 t;b6je9k ⭎oe>@>jrgj8Jźa{C`2o@ +& .`mb(-:DrZ=B:ƴrc\:噓)IG#McE **AH,!R)  5ydrd2KM%Rg~T-]Zy=ں%r#iӑ m6w{g@S6Ivi@Ab5=,pE-!}y5%nZ{=6yi4y06W}vM|`wس|K]?{YoWYjUӦ7뻅6n|W)D^d ֹJL+Y!Hμ!wl>jIHDRK8 i EddL;O>*UiL u Fp/99wI+]Z[˹2v>1!r|NB81x O'O9Q%flFv$4#83 u;v?$Gp5b bJ8;XYZ5˱*I[a02|-PHXX8DL!ODO,hn0K+LNa'Ԥjx=Iw~p.!eY#אGw?Z-m;w$&zKE._!9s4g;.R(ךZ VtHy ;zAu8kքx]%8ygLX'-uunOT˓r4|ZR˸j* Ց ޳ѧR9XbGYF3s*8 w8;tZ}Wj 2;f2 Ă&z4|-އ& ALfW\pibA`JnO9SN Rr=cbDmbA:j$FY$:ڄ̓s]~= A$t?Vgw5edzfC?:/-w6]MJt57䟟5,ɲo:9I!'is$Pl bT+1*%(G`d97Yw ;y?Mf ۡ˥Zb{zsr ~N;<,wǙk8c̎1ǘ+uǡ5|u~d⑙bc'|W1ګ U6Rۆ}ss"[Ҷmȸe]Nʝ[qwn-9OoJ 4oX)«P+- n9yǁgZd:lx`"UNjAREg&4c޲22uKVavҒDN:+*WҡyYfw(EM1oK|_xN[!)o1 d!Q)7Y7X%\x0sdr&I1hlTne K"l:yYujszK!GB CAOPByyZ4oʈ-DsM#E܆O Sce({E?Š*ݜFr1a/aZ8ТFoEH3*n5d DkC"qQ ELQ&5.)2kiFPde9M |JinoQ2ǥ[zpELJ`i1֫_k#n_?6D+EpBLk0\qFV@VLaHU8f;myKvJ+w*gS$wZnxߵ0nAwҕ}s$Ft͵W#J|^ɕiڂKQQrn?pEkSWZw& ӑ*J/@2J? ChPK: J %|FCDM.L|0p~t?mTu@> pJwVE9[5pAǰ&٫\ުg?? ,c n>R$v6"IJ F0ql_nmf,Ք߶!s"HJ3(QAԩr>2bV,p{X_ګ Px1n23з7t)db&\ @Qɋ-,"㰭>*vؐ PMQgj7 fem8X0:?p??~ӻOLaFޏTh?GX uf==}y{;W~wT}V7+pH}Q@#_f_^X8Yْt fΛw% ؗ2<6$=dؑ`RoL f*w>t3I| ,P\U}ì츷x~֫/׋Uk /,j2wL{θi+ 6w! Z`CsNBj- %"8{g 9n.]%n+dB *#?Ac ]wAyi:[$ӦaSN+#O3 x&3>dVg2SiT= +0.^&|l0%w}3Q#5"gՌOA*h #Br9&AZr4s+%L¾7>{g=w`0iw9*Uc sk$X!jϦ) Ռ~HЗ`_ڰ&a#bXcs KT=uLr6^vv:{8Fp][ntц&B!|7bN %nvٴ+Κ-ȵ4<[#Y/~: :}8k}S$,翟 o9I ivWTUU]]%>%d{0=MhHB(0Y& ORLwS~A ȜPI&"X4N `Z4q}~PoTRLgQ.>/w(:g y_ ȂA9BtMԹM=f^NaxɪgW]Ir 0o9ѳQӉ\EM'jW%*yߤ\~3˨x|r c8L0+ 3臣 0.:Q2ìnA:pʛ:׳JxJ_fl*،rlzmge&Xv6aߍ"f_N@a"X}G5 y:>yP$o J5:'62x"WsQWZEO]]%*g[uvԕtU]U"WsQWӏ*۪e=x3ӻo*6C1W^3* <7}u[zgS]qvgb!c?q& \ZxxbwG5 1'I*[E;Rqd ,i-T?\J䒳QZv@ƭa&U?c\Tk?GUӧH$jQg$)(D C),PͥH!0$ fQai r^N#q%L-]BSua6y̵ =ᜀxa/ @z_}vo $F(qȄ6cL)e HI5m^c$"T!CiRoJ".e0&ȵ:Я$sӴ"r%ud%_[S:?m$qySظ4;vqScwcY(dLJ)pfsz$i׷ǧ g 0V+9%"s# ,*b*2(KeZ30{-a豉hj4BZ"ǍaiDZn~ ?uh5nܧç8!hՔ{A|#߽ߠmvM.y׃VvIg|྄Q.Vwz w1!r4:Slzu|M6m^׮^͢ChAˬͻ]z6^yd7yj~H~:&7-Lm0uL*U ^}V;%Y3Ӄu mKȈR+5d(O+Lu P+UxGEYEJ)S;#Rţ 'R:m9rF5a^Pʉg9pp e}{zM@M>G>=deg4Q= 퀆CVDC:LuCؤJvO-c}UodKB{U3 t)mz{M@1\(wp)BSϘ8P=d:FY^1Tk%iPsOٴB1·.Tߍ~7tqfRhT6tz mM# >>4ߝo"{w?g0Wy8n 4,-dž66^hM#U chp%J7bi1g Ƒ&&m, |vz_sav ]Yx-h> B=Ey9I̧ lBy++.%Jl eJh\]YZ8PLCL<7ݽvVmM]O`js"mqIFϻe.V˝[pn|u->˿We|hgNe}qIb7ߺᜍ{iRĖe=TfR!,F'7e: ̈́1Ec6)3vg6oYubi*l։&IE_U{h\xI=[ʫAQy{?mqҀo su-F"r.Kw14ƪ i]ZBMUk6ϑ,0EacjZ, 1܏ u{4.x|0y??\'+<#1<΂1yrd HtD0!RL݅pg{OE|V gta85iatH\5`Aqu(G|60>cqp%຺ .ueX3FPX Q+8XFNv|p=R037ug<]_F/>΂%Ā 1w[{ݛR=]GZQQOB5#kGb|HwuÐa8fY> : V0bSt]wbrQ Z=j5ɺQ檥RS!ӹld$ ́}6wWK|;3=jWt*'5?/?Kǯ~t׏ן~_`Fu$`3 ܉v  fM UCS6Z69r7&\33= Z;?|?ws"꟪/au:#`wJ; b~7>WvQPy|TjB Q!u~_ՋYcKIYCE̖T6Qz;EIV}z1;!2K`gU")E)Yo_mu4(!oBKϑ6 L2J$H-VАad|(~&R _w۠`}b;00-)^vėt!W0#,A@'IA\'dV"Aa_:5e찓-Q"\rU"VFy@>E:EsIPPȐ 馛t6琳as>g6$ H$WٟlWMflO(I|1tOzIn(.`E4xʂT {? e68!/ +T&x"'GQ !0<Y&3hR2B$Ib!0\$VZ9'jLKV rTv؛] ͸ƅfvMU&CG5DyLEgXB/3]?_?)ALN|BFu9  2G;s0Y )Ei{h-)"tE@2&xbrr`90*h A5263g32*Ͱ o ,6xbk9h}e?y.FFW;G씽0eZl;f9R5%$ nYe0:VK^9UCuBBI%+l*c&S'[طޔ5FflFl.橠vѱ#jK =0#D]8M̄MZc8QA(1x<|RLiEJ4 { ً*[ƚٹYh8暢#1Smh-̜x;K ǮDq-pXc*ӹ`)t(FIg5yjLҙ())"Xd8kTEY*P0lM3DbflQ2OMn1Ni(ec\.n״smL / V. C D!@Fpqx*xlt슇1pvw|_Yܯ[ɐ1) z}\~|sxGִ3Iy Q%% Q ceg,sePgs !q(b5:"VLlҵ#k>h5\8+jne()(xQ13KL4ղ (EEY#BEP;3BwTqoRg7 ycGH{NQ~C*QwYܡs %FMz9ˁ^(ʨZ$rpYS)t0y( R l2i^Ie󓝗vwﳒ2*}\k:~ʽ~Ow*ZufX*[Ttܺݼq:k}ܲ#]xuA㡖9]z!{fo{hz.:J}]=>5wCsy˦j;>-9D??DڟNzm͏#{[ꈞ_va%O:u duM'SA(7(௿F_AN._*_w)XŢ%UJK)$6k *3&gOQ<-7Mydlp)?7(8? 5cz忖 `Ox_s$ꣅ ~p/QϪq;ɯ29q?Uzسw_et=x^ز-gc뗅s=9o orF]P;E9n_{n.>]CRmU7{lXV}ټWC _Wdfm;XF7(r֓h6>^T(Ȑ )hv@ygZeDj+#QJƠrE]0RE%ck (sp*KDB(7{ifΖ \_Ar~hEĢ&Ouw%/6}+07f[gPV" 밒gz_QI ҃J')D"Ա0 p`IU'!yҤ͵ttBR2쾪*o+Aq211"Z]Ed'FhBl!̜P>gWg/|"v ؜Bڢ575~_# |,%bZ9J"*FFgJ(}EuJ.yO)ڠucѱ=rL.`p&-dіZ^+Vi[umvwc ·BrQ@۲dF_ܕ_.r)-W n?n4lm^8E-U%:*a0hBARR Z+pDY/SOllߵ)k6kp@g"W;~fED6kvxcd f덮ZglT(DzM@X.iޜJufcc[TG|M 2|(lS}Ԯ#vIN5Oj:|3]zAV9щC!b]l4(!kOY]~:6lC <)/fߞD4+g 4.Z s;)(Eb۰A_ N@Jd1xS)xJ ;}g9UZoe6a=.%U{p?71^^ӧz;Uۂ>.;$Cխ' }Q%rPrv*NY Y'eΈQ莲 t}=T{ `l./2rZsi9wnJtdsjO5GMey =LV]"{p*}nR ǁiPU6ثSTeZ8 J7TeUF=ZCΟ}:r210:R2t*{_nilAPBV[HȹZ=d#I0 .S\{ -!Id cRŖQwxz?O?baCuxB4ʹM* u &IQ$qAyS0z d.Sf{+69\1XMgsn_usF,fՓSȟǝ2-NYJicDySGIiI8x8{'seNYI1W$b@D],sBKȁ&#A䒱 8"hd$#ǮjP݇z;9ǫk3Rʖ[=Go1uvv&=_y1:L21)H4:{,LPxWFAF5 fR3ZE{ de#P*.JWrk2EhqO{kz1:WAI/xOAseʍۚc^xvPTE-(*qbHBXF8q"5h bV(J!$ 'uETLl :%*I% A42g32Uaa+Gsr..e"ty=%_]jZӠ|aJב#)*0mSʗ GYvԖ,5OÐ=XN8˰edTBߎhD02W0bg3b0,fǎHYꫨ]-U5*np{ 2SdbE6h`Kshr oωlLȔl1qD$}=0g\(+iރ=IPB+ޫ[ax g2M2P@ޜ" ;9''orb' N&N4ݳ$άiέ'$:ua߾-7](©6Mwπ-1BW>jnff'kTp(|4\zhsVYꬓZ]W^($AęN>W}9AJ#hzHq ǰRC̚J%Ή?=?o?)ٿ^O(g/ G"cTHF /hJ5͚iZu7[I.h b/fz|mm7_.??fgO>tZS٘0'(6){O]T.ďZϥB !m U?Ӹ{ݏ>Z5q69mnlٜjF)(o4qf>[߁:.cj+h’hͅG-~fN<%m'k< 4TABtJh9pPC TcAĆ9wGfܴ!x&E҄+$Q%"-ȵ<È!TCIHPnK.9 tԙrBO/]g2?& ۝Mna+՞r-ǫ=1A kNBckͬ5Hb>"Upy46F̱YZER.YBvl DIP\MٱUvk*K)K-•ZPrDph*kԱJ+[WYJ+\}pp)qDp fp4Y}o?JVJBy07]~^YfնgOI?@p94KJJ#ZP`M,[Y\v4{YZЇlPQohJR+]skt͕5WJ\h"R5WJ\+]skt͕5WJQZ)L*]skt +]skt͕5WԮt͕5W抈5WJ\q5WJ\+]sktGȘYKW+]skt͕5WJ\+#KktuWJ\+]skt͕5WJ\+]skt͕5W5WJ\+]skV6W˔ Ґh"'BM jZ=SVCK+ӈ)NO+HNZ> $ZY8"bV1 pJa5gݟvCjLs̗$XyW}H[7Ry"rOMye\sFj,5Z+Dt\WsAeQ脛 xie̩%!K[ l~.D8scc I2& O5Zks eJ*+g\CKG;3Tk,Ii-IV 8Cjb> fo[3J.5n˶ha|[  rZ&RH xb'i9 s*V+x=ݱLF J=>XD; D锨ăCx ر p:en;pt7b[}b͵% 2+|5SR̿ח _?Wut9ws۫&J;F$%#b ԋV2I8J[U#`z֮SęwN-'?t~ZEE6^F\.:gwihW5tm=03"bbCEN{Nj]\{O';k`fO{SD\\)bM2/1zy'eo^Xp6F㹨~vKC. ?΁$B| 8&(AsF'1J@fVD~i$g>z/FZS 1PKwQΘ FԃOZ#U!2봫N]\YF %p) bTmA(3Y`j9}6S88[t;U(R0͎g1Sr#sўkV],z,=Uq+fwʲ6V1@E9 5=y.wӸCG|%6S!8RŜ\ʔ@sl.~e TL$A%#RP3ə9ctm1q6Wn<]}5,ҏ2cu+2v`3WKu ?63W< j|2/ȵ6d^gD: .IJ Tn+gsDi+ht[d:Wev՘c&i^mnVŽNYcpB}}ٍl)MO/<uohV68/φ`%Hke͔PF\BpEr8= W;89sMFsB{(9p`x㭐 >Z2RyD}TFlso$ 90^3Чaᔚ;\~8R 9ewey9n,6L's & aap)B˒# pˋ˔-O#|(S,~mu7e%I`BxV7ϳT7s?VR^9D7ʧ܄m6%8`uP.ׅr=*G%wA:f5ExP `0ct"l'KeCa2u]%A8yH\")LAH N[MW8-&#se[][  2[U-WoSM6]A?5GRH<5@+l7u3U9XJE17i+h]pܨPר?iW7@te >e;Ȃrt}L}rX"\eT"vY; t6YI–&g19Rߢn<[z@m6N|n9zC@/9T|[1-v癠%{: 7^ʛ{gPaaBzm@&HÙ:~OS d cg K&avPL{4"hL$pNŤhvv*vX+)`UѵbaC[ [uduln|@f">|O9MCʣI9)<$BHY X%8Sdr΃歲y`>6%׃&OgU Z {#=oI&~"#g..0rz"yWq"+.7L1>2kԭ'Okؘ?&yRBkk(\jBZ{ϹI骮wU׏:gQgK*#pn\#aFxg 0"ODqBva\/[7RUVF15jz>ϯKd}5L?M*FPI`0HhtJ0V+0^Bv' I#Q;8:IZwsRy@'1! 蹵ES4SWپY`Gi^ً__3̸Fh/6E\qF8E!X\bٴFJiFrTkb 4%TXv`=w)^&'Ќh-Í%e2 0pE̍UH>39;vB{,AkKri-pmDZ(ñU;cxqءgBGd&$ S/LXR̄ZŏLXԝk4 WD<( ڼqUU:q5++"ۗE _*ʣWJ;q+%H~+}9sVno{VV͐ hP-2Fzh:kh5Iwt;G*r`_{ykebW(kٽ7F e+^ 1&]7Zh {A{]4U3tP*G]s>o51O4gdڏ1$cPgdQDic.dfC{u46Zj|o$\, M;6׉yU׷9*䴽r˭u^۞BNkIXtKIZU*o"U)" <9'.)= bDv.Ty 29+DR&ژ"yLFzεTmX풱V& qƾuE%Eenӫפ=l#Hw;Zܰxȍ&48X}8fVЃ(zL8mj#ApڛaEc}XV!{kM^>1kiνUIil[LQ*RMcٜ-WYi8;K:h <)5@[RNBX}[Q˗|~4Ko %J+?!e]<ޮW\y]͚r.w!ܤh8QIϝFFhJ%ԐUr[Bƶ$Wx~դC4 ,6a kQYM0 \y)X-H]vUf{Eأ e(b3 `p\=f8 :g-K+}t1-Aaؖ(Xnk Fa ~=ʍ /%"?4jV'm-ʍN{]f#Iޟ>4*ar#m=+œͲ֒,em$S{r]?)Y). θd䒝Y#%O&xO&S'8p_0 "c<7m# ܉%3-H?S.M< <\[~]VWVŧ o߾^\ިpd-NNhEMMf$rjcΊ 8>R-(wcNֽYbu?fO~l|w5=*8fI̅A|]Eiݹ.wockFgUw$b0b03;½ì2˯G,XNpyГ1I֎*Q\7꺹: pI&d=R>|vW_W xo f6;r:umO>?=9~}_;OxrSOS%w:~4 x=;C_054Zeh[sa\jNy͸wIkNo~\}O I,֟!ȯOMҊtq=L Mb6λhM }XR/R+B0~@`ݽpA|zUب#DK6f+p㬼"  0 f ~0笂F@,EtbRGeY{Y?G˿׭]JRP"I(Pf*),GB"j8|ۑ|nڭoH8 k(]=*᱔OYB얮] &r#iQ-m.}B.~ A\0ԺKfzsɖNml; \uσCvyf~F[<|{w=]x~a7-'3?;p4Yo\b^-inyʨKZijnnLӐ$gy!j2(/p%En,oJ{4J>{N?#Ͻ-ʌ(0 RԁpT4`tBT(rJSkhM%.gG<&6[&lQwdjlIB77$k g@N~I=.?d'ֳiwG1bV"DVvʏFTdP諪H{^|1:Sz uh=Rt`:&1LI)8JH{:gU chXSWNڻ JЖc~L]iOO=2wuX4MWaMjɑt$$[*@Z-JX9s;w6>aީ=2+k#žQRb׍JZ FpNqŬ<2(~vdj' "lq LmԒI&hPX- ƀ:9ӷ!L+uCܭuDh9a 灡2w< 4 ڨ!a0dyBu)ǝ = D PH"U)d#;?TN1k,޺Z6J56<@Yb:YbYlµ?:=t YϤaY9- $ٗs ⳠHD,$[@J0A̘䲈 B*p>8IS-'tyԥdc|NY0xX{=e;xF|پtRfvzоOb \X 4c?{N ,RȈ: R/ 0ʖRA!Z/T:s`#D+BQh%7*qAT1bZ:eyfDZ 48(Ɲs4d܅I #}lְrK͉ɇ9$;#Q5z&ߚuipې-r$C1}V(c4hd<"гzykȓBl #2/Rb9J]1&g%Rd؀Foƕ#lZp"9a^Y3.$Xj&GȒk5LDfuYz߻bury/~Lw4̜:őCXMD{,]WWo8+XxVsk- $g'8ޜO3ܿ Ot[}.ܠ#%kU˭|y~{⹵䉖D!:"Srԙ'Y%b OZ5+=ܫ@Up2 68˖> -[ :!iH2["XE@Q WJ V5Z}j8wS_^ eVpya籪˖mze=Tx=3:~.PZo%AgߗV5 ++lB6%ku:(nҰH[찕ZX[l7 2-I{T¨=24&y] 6>!A1 umv`#7&IK"Y"VeA謏;m5!:e%ȕ)np6\|ZSWrݚ>5إ|#&@dAZnzZIWOWI8Ӻբy.ix"/jԟ!D]oаL*0v/1p`8qWu/cl=Ǭ-fnB(86H*#1sz 0'cPKJ&@iF9a%gcYW;ITJB Op,:@gd>qMZH91*j70g"o |YoP|GvM*bP'SVL1d)[YU/e+Kwl%E9Vò,g9hJ*Sh<_ԨW) 17Hxdq mb2 9ذڕ~q#D^ɘ Z3,I)H;UG+#yp~xs%hKvbٲ\|VgNwX0K>Qqss3#1W67b"JVu]QA̽C1gy{lS̑#h=FnD}{{N;-R'8 ~pA?KRt?K|3xrp-f'{ϋVT[pWzCo=\QZdWPpEQjzph\QTo઄/pEZ&W%J\C=,j \hOYQ"{+v8mY?嫮ܬV?~XmeWx9_-B8#b_a4.Fj1ݜc|D#J>ǖ=Ua6\K7D}(F`K_Y{Gs7~MPK~˶m#G^56^t pfNR#Bjѥyp]F>ڣ~/?\0NK4> e1?/_urVz郍Z}!AjM0,5\<`$(`kD_Z9\[0̵pU(x{>Vµ/pE@DWRGpEp7D:$DiW>[Mʭ%Ik:M(Mb&|ӇZn!dzEF2ưG8[%\lw ގ|e#Ϫ[҇m{ŧ2턮mCt;faYAƺ1YnPܳt4 [Mp\޷=.kE?L iX1}|qq9˻qCBYy#'Fa_ϑ3y zi$}PXL+>ZT|8(&@)QGK[(a9g2AEK˪&:%p +e'lJ >=Cf1Zȑ؟Wej^3~`>0#f0SD/Nvw;A$ONv,mGh猭-Y Y:iHSX*VNHt)Hmu:gSdkV}Z%՚km#GEؗ/[yfg `_60xȒ#I_VKlԖ[tj,WWIଶq.  hƉ  m{+ZaprL R>1|?K2n8sfښXFz}ټ 73FkY滬QҀ-5RHEdQ5Le҇`YE"㎐]B$\ ?p{ uE~KecJF'J3U,$m@x WD"\'!cc9p#%]19jX{fDc"K! ɌMߦ6Č#\9w>D)OhqMLLښ-Ԋ85&!H Hb ũ#Bjw!;\;bB4b co?x\T͋St6s*8 r~ppq9[nvQSmͬd5ywiG@HMKږ@ln骮Vߌ ͬNCK#G1v1ӻ6Wٖq 2E'ZQZ-i>258}~Oʬ17S_3z +5lTc^`4K|{_~)oq3,T'O' xMC[Mc{&M6{] xv9i5>X>Q[qf.rq׷`EhHyXvMteu= jWUR._^JBB4b@_L*|bj#Qp'PplNzSV"PbFށဟ%cj+aIĈ@*k~b(}uʫ j^)}>L--n? s9߈vmyV8x5&-g˩h3bbsOg[gols$e]w׳DIÑh#1ZP‚7`Z,q$NztSϮpexГl'}$H@'&)Y&PhE c)V`^q 3V_F5ѼTA{?uz% 8fPT'}puYoֈk]:ȖSz6*;p]5lP;|$ZQ+Í8q5=.ՔqRՏޯa L^pGiKBHࡘM%w- $F (̊hX0m4ѣ>kM'6DB-ME Θ|2 Y]g>}I,| ۷<HY{D-rH|b_t Yb490) cDq<NOF> \sG&%18D Xj@[9x}X(n(S`@pJ|$.vs'2`)kLHڅhpԨ2#3'hk1-&Ng}q=-7WXÁ5Q,m?8 }܉'Ƣ/% z:ڡJlp,ڎRYVRt(+=D/PGqUhΜeEah\q:pŐjq bE1BK(4Zy cVUx*&h` |)(řəsT9* ƒj}q[1\pج;+K~"͒au5Yw`%ײtj?Ef zfì' (@\kmSp.ILyfl) BPOp$u3.CzjUOy^I9.HxDqAse6N"WN(!hԤa>d9@>395x 2P!NRb;n4yIsڷne&P*d12QǸָjL+`$S5qDzϮPm?Nm~\m.yGhA:K1^=^w}P b} WVDTu@Dᢶ @:@\Y~sgQϖԄ<.&!+Ep#AsLQV!VX,Nv?+? byH%B Vd /Wgء##I9^JɷOp3B(Z^8l!)a FuH2%N9 :9% 4qD( Fp*>;mu$G&PbmerW1͔}&/ƒ g(Q|FgdHDZ<2p&smd&oo[, ڕ4UoJ&:ab}`b׬ oe߀QR*KIyS^e)R^})R1W_0WݙսσE5*iI12Ɣ/Ъya \k):́bԉ\Sp+u1WxL5*Y4N2n%1EHN[## @yHFiWJ2֛srezP؛eFe6]~={ E'՚K)L*ZZ1N#K$(URCu Xj#ޢI.Rg:EX1e  nM+x#zx2R9ݳxG=ޱƦ(a[vGC@ reqLO%_#,KrDaYJ~a<<$u'u+g)w$b^ $\u̧nTS"ޭI{p6*{COoo2͋Mϱ((d?bo>Ý+9L~Ju#"˶CC6n ؋M|Ȓ+I܋Ų#ұ-''EX"<3gÙYVzWA=QV{oя,@"WxD 2U9z1{aj &]4~|8upJ%Z^1Y'P2޼lMDInM \06M>̯̌P'o^oU$oBgKr Qű'XZhӮ9L?2#V:݋et,+sB#T 0W\r{^oӽdmVPXY- p'K('(҉yA*V勱 RKPk[rDa%-ZrS/DW\-_ \OJ~peR 2\1T/ "jCBq7Wڈ%0: 1\½ 75JٛldNŞ{F\e\' g9b8_Lp{S ,z~.z/?3IlSDrD^fĮMh9xywI #>#d"ڂL J[r *֓7^+0HLK(Z‹v^MEL^,yÂLeZuYǺ]b.Xu:e벎u3툣:)Gre벎uYǺc]ֱ.Xu:e xI1oVc/R!BC?-TT[<%.Xu:e벎uYǺc]ֱ.Xu:e벎u-KIKiݲ_UnZM}#G=/ѳ*d/zWzgrW}: ŨO[Շe؟L;GԴ(O -ͮGXVe'8ddbihS:]8d 'Q:ggSoU}fKIOM, Դp(= !KidBM]m-ϭћgqO(OD4)ճU쏷3Qˣ]o4q6W.dbIYt F&4A9M$.+>c&J,q3dT|* &rf:mD*q!ı@X I"#AHJ"戋 X31i9ͭCVº cYVk!|I*n]663 ER‡D:/Y:ni+AJ9Q .Q9^2%`mJY1EtA(uԮ պznOӗ>I^¥?̳{[7 /y\@ 9=ĩkj,t} RNDH.|`ޅQɅ 3w}I{S |'McevVd2TX22c $e7:nrV(g߶Q'AH-EQ|1HF5I% 5D`.v'ECGQ8o3xB⭞'ފlf-ʶOڢlJwg*q.koYdŷv'j/C>qRk8AI'aڕ 31 Qw۴5 wnM~n~~>4BI{@[y ߨo ZR֦mCVmii-5-{JG3}5] 0uM*y:4g-hYPP9b< c]6}UC*pJDKWAS E%JX`m TUlISr))"ǰr_ۦDĀ18$L3 "Gp;+iJ]-;Z#gKZ>[3=Ջx^έ}%y]FDvqn3hH#Eh799W`EmH2APMͅ$s"z m''o͂NuNjRKӸd2˕C';yok%{]r|]z7y}͇E܉'/J.g6TΜG9k7%ir*o0CJ DKʉHH8CU n'EC 1XwN{Ψ1fY)zY'IM:`R49)*PHt@mdl؞bEaႢ2˫7d>El1\O,ş6`0k>`\CaN=_ Q˙zêjPCv5Tw~px?m"dKj+*C>2.vpg wZA!#8RȒ3gU)\(&PV2!ɰ1_w;?aNgg솮g]wuCZ&1ƪ W]^?AC.&i_ZW뻧N6thO߯_ͺCjYQyݮ[{=4X٢祖9mot}ԛ9o[]%7tE{صֹǕ*ipIZw$M)zFRV *ArYf0l?8_\~njϼں/l L,BBF`Y`:A"A\A*Bb>!vc\9r/R(L@QfLY1,5;Rh8m39[z&ޅbXj&L/.k`1~*OM_JdG~¨wτ.`P:NsH* q V Q$G! Bw3C9(KS,Q:Jm{TΨh%>*&h̢F2Ky4ւKNt΃ 1 ƳCp퐾 C}s+ӄ4p]znuo5vPfr6I ?5gߞ|ÝEd0Ƹϊr[WMmx,v=|mȓ{!Y6yͣb9JNj&g\RK ɰyZA0(|zsC9ZgF/1P7x ID 02mlqtIp>:oMrNW mբAf?P2)Em[v}yD/EgwAfqg_9[@idL&19-(˔ӎ+mY J7xJK.T=4jDrJ:dA0!ՊfhuhЖ*!Z'EАpOjqciBA?\{ZX.<ǥqG4/`!Iq8.\+mȮOd^w-|H00|Ab5%->)DJzR-K[Uw9֭ٗ"_7r]kƱo.V;;9|Hr,~Tɨ(_x'>eVޟ} cVٺ޺zfj0m<=?#ֻ`yªW1ߡ+q2+ZZffZQok,tqv~gS˷%V:&B;H<8kSljsN(;jњZd;M߳I7|o?@j Z[z]ݸWi# |r | ՒNxtiyۉ}\̟[ft",)j$#Rg'.?"-O\ξp1|&~f>_Pˎk.|BcVD"PZOwö ]1(Ps)X-jԔrUY57m:EjaecĹo眉3v[/<;Y *`ŠXe8֪8T[\҉-CV&7kQ7/ΟK-tw|@iqU{\8{(@\%trbv?c>>~{C<ʄɤ%Gp(M2%ZJbࣖ5vqO&s j٢mKO ,g P(g o7u Ks_ :a߆xoo7qޗMZϽ6\-~(ro"nTz}C zz6{sr~B peQۙ@m`"v1DՉM"}Bv/,={NTS>UU-jyM@rmel2e6N&j܂ /u/ .Ϙ{ 5Pz x`j_9M NeJfE7&2hBI$&rWiW=;>n)(ݹbݧx>]zZc@P,9þ$mRY:EGΔ lVi쉅F3{kbw=xv-serͬOsc('trϵ@ 1!>drCSM>Mr; jΙj>X*8;q; Ǥ'ז:x߯ۊȀ\-_2BU-wKѠ^> GS IR~gftXD8;#SvEuE*x}HoKe+F>"c{)= .BYu,~K9[mfE!T@TgwlgV*= ׁTڢ94ܛ<&.Ӿ9@Z-sshR*usxy4fVrҲj~I]@wEu{ joU쾸&- %K:)Od ޸+Kbo`V={wդ| WVuzsu]:=z;yVׯ-zx2_ũN5Гoy2;X|4c4O19NJ?Q4o|-zi~3n0b$t/_gg瓿~ݫb/I9l{Y;? 9%JՏW_x)H\C',V'0scb#5{+$@>MΦe׋ɷt}]KJ=-Po1 !~ǥuc Y_|ƫ7n֟aVIhr~Hw{QFtIVϦ Si^K~AUڿ^\i|ݶ'rWf'%&.Oo}Ske?ެEe&}8m2~sgvi=:?}4Uns80dՊz/`,>}lK6qߤe܉>+@_^{~!Q>gVid_.w*:u(ՔkECIC7u"]}kvدC*!x^jw ^ٞeC: VAjihY?#v`T|΂a^R!Jmz5ٌf q}{V9?Ffᴛ_V]A K(m"++p}j'{/`o^{dAnbcx]0Rk{3_Qɢ7x9SՒmXdTg4F }vv-ss2`Sq5?.]F\2*>OK"WZ:#F&EyV&kA$&}ذ,e _̽Ux4,oMz`CUo-\KN|UTIAnr"˧Uv^[6$ +A2ƪ}U /ZdNbeHZb2ʸcV)kU5>%U' ~hQ4RlwFiʓSZWJ(K.%ct HndvUe1Eezͬ鮊ܶ}_tVcRJ[WR8ʪHGlP̰I\vХm"dk`̘ƪf`jeI՚#IJD\S AXm=@--Ĥ> T@evPhQ ;mk&M*C; JX1U0K1L4k&.5I`6^Hvlp"lu06aX0iiHfw>$iMV:8k/1c$Nht^8ȅb-7EͪtJVB ?' BI 2cEENA 3aۨ/??L,M |##+V ߮50$-Z،)1!8 GD/ ‰"QkSl.ʰ2LžzԸtJړ.x0D%v4,+dE\Q"VR.20* <(@bAu׻-ϘV0 .)ĶCTE !XȀG52‰0 3ڕ?A[`pO@o[_=6ߏMwC|ں%$ IXJ&ncr9ΐy, ,DhI2C>'21VnO@JzDI<`(Ke$ig҆'҃JA{iw*NaerXVA>Օ7!M9RۗK=P0"m0p(:b$, !`=}!z)pt"dPKƞ ZG=88 8BO 斁3?ox" bꬋʷ#Zh"" 0e| f7Y D$lwݦ B,^{Rh`[C'(߁*QvW8hw""x"`ઁW90hH"[069@F@D ӛw~e-gFp'IySWyrRN.sE)Z\M5HO]*saYronrW\9 [\5mԸTSA ' !44s@b>PȲ|G;=JX=)q wW#]$.N-Y߷ \fVvsJ腎6Wez'21H %ekQxz5q߷4'tBuO 8uˍ,6Ӳ eUU6LQgoj'yt\X˜, ^ O9PAx*j骄Q4i|&M9>;EO68dcW fܢ.:{rrjݔo-tr~E.)DR2|V{5^ĐȆF ΉBI`DB9g2"jFcBK\)]28]2VgeT4X[h-\ITmv[<"<&'Ol0n M s_ŎJJU0[e$R*Ƣ|8 ,l4W"OjY@3`EFb6ҡ`IXvqЦŮ&~GøbծGZmFZEyhуx9*Y& 6KM8M@rpVӜUjTo$!gHE9 ʓ[l+pFY,Icv=&~{a<XM?Eh"ʚ'i2ȔS\PtgFKoBUeFEn/x:T5+Zj췈w ڃ]6uJkմX*Ev.&HLLILS҄X q${bu#Ix3h_.=C^ թes_/ #|-V+{ `ϏQ!8Jt#9AG:rБt#9AG:rБt#9AG:rБt#9AG:rБt#9AG:rБt#9AGAm9q5|8 gAi ?y. FA9kVqhcşsm !FGo@Ɵ\' ?RDꀜi?qЕ7?֮>6iۤeU=v_e2:.8 xHJQu97]qWv LAjs։yBY( jˬ&΁ }e_t$ -? *JxELI.;eDw CtRV0J*nʙYɃrZVRa|іK&qO>k9OBV@q0ͽݵ]SA ZI9岴bei,%93 W0+%}II+TWQ0頁ssDd1*wZR Id.8F9Jɕ@;B plz#r~3ڊ Uj6_?o8_jahʊUFwZUi~ޅvdB3g7.,K]_lD= &r}9+s t(1* 7?\mϖEd"$y1m&mPV{h~ЂWᬈOzj\Lztn,\~Lk ɢeעys9_VNr6^ނ*o\_}$t suN>cP%vG tݼpkP?~퟿o>~o@Oo?70Z`L ]"Pl,yT_oxBբVP߲jn!U_h'wm@@p74nVMYs~&0k74|o?ETٔJ?YInbkB 9]-I7cgMlJp6">[qQ=JJﴅc<*tFmOC fVd2 \X2c 0~߹X?q뿎F?xv6p CQ| D9W0B-qq𳯎,s_Oq$M :"L>pm@ $\ҙHE&L׸ UejfSaPWӉ0vtM.;>v+̫N.(-> _lTH^F2e$/#yH^F2e$/#yH^F2e$/#yH^F2e$/#yH^F2e$/#yH^F2寗 :9-Xjr>e &'O^)BH^e?:y9oSe[c}Tx$?k" L˴I.svG@=&BDiS&JoN(}dHY)%2Rfj)-qd9S "'|_[ >'x*P5cƉٟ1ٰ'?XiQ$@z2Gؗ`˥G[OH.ektc&x ?f6biNj,7R_qi+ߙvjoq>0u5Q2L6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6dsL6d6\Sb>ds}DI6/*r.EZCO=ټHi1kL62R=v/5_mhM9&qul q9@Lޑp1O4b1SNqƼ.B.!f ,Rծ0A@ ][߫h^0zDjO4oKH1&Q+nr߄~wyn'tkK|~@)}{y).]fI_L{^‡=2:t`!w6k.L/@ /vx/o7w9)tnnwf[ B[/vW~R _+n}F;[>.c9z΃_7=Ga{u0~uY٧_c{N$u0IԱֱ!㵖][< |C KxΨ|~v o0M.ZZjLvܭ*pyX~n6 |>[7n|)%ܙp+B#ύ%94&iD<.KU{y9~Ռn-lJ (MV\8R' PӚɺqe;v?wllĴ۟a?5wlSrͦ5Ϋי6#yH^G:בu$#yH^G:בu$#yH^G:בu$#yH^G:בu$#yH^G:בu$#yH^G:ב7K~nXV_|wIڔg$glA\F ~xRZxsAK/\ٿZ\۴i&I#Cy]}g ,/,bEd _KG9 rLE"?#IL!%Z$VfdVgBRPDU5TDWg?:%=kI;?f`q HOώ1 f_+ЙrxlOdJ_`:ſ?j}c"!ms?D9$*r#6U?NxCHjOfiu*1zNz'%%&."nOGp&m1ΙJy9@ZyOF%ߞ&7[z1/}Py>u?OW~ _yzK!0bk}@fwi؛{e9!E/z߆Fnm[~7z\EvjHQHQquIV֦P/_%1fyikhֺu꽒ե]wWCh~^:tt; %)f6rOF-i\Il 0'-Y}\0M׿!D^#=|`S'SM~]WrV~ >?;SuXenHֲL:pOr] ܜ/L86xc`2iEYBؗsr-2 O HNIH'!{bw8O9n|xNg}x:*Z A쨴c  Fz,"%w5)BnRS/c./ W0 $ ^*oGs.)9 A+O%1W4{`^YaG tZOˁS:]:v7vRC"x,*8 t>aNRr S!b޵&$뿂=[M^"o-jfVhgʫ4v{~F ґQ_DƅM$2yA IJNxƓuTO4[<:I:W{q^p^ I2RN"WNQNy2\~j~*H ZRk*78gid!ؠ`,n\z5$"@vq4p(ģh[OnlUhQg6CC m nq`~{0^jC \ zN(c%޸5 Mƨ)DieRa$D)HIv)hBcWubIZDңZ\RQ[G/k^K/TnfS䮇#tu*n9u G4z7u6:xOk6۱#G FELjn~, 6oE1\L$M֡}2D&,I-;/ D#rGzs4 ZjgheQ< mvj$4SB%s k0<8ۋi:+wxlo԰T6t5zg*ڃ` X"x+$nX@,x㨏Hk9Ήުe\xN2ҿ(Lz\FyekRNJ~e &Y\*blPx[̮o!8wYQh !'UձK>>R钏OTT8nhۻ܈p-u@䮓Fo ؎Q4yIsڷF;!nk4 t/t.:1nӇ547~h\|ΠUof^IgtxDx)19n ]َBY j]Qm$"@žz굻nWNYjB`^[/]L BW)F=$N!8 )p\z'fT[pF>_nAr= xȾ: 7kpщT jt #F.'p).3@ϽeR;ltAQu+gHl",4hE Ǚ-$%Lߢ:[dRJr@trJ(АadNp/NL+f {t%DN@`õ, xTOazOo^/׋,TJZ'aqF@U`3-gr\FRu uv[wiJGM0׋}`?fŮ8e6s"w%u͍xFT;j`xTߧiڂbd)_XZ8PRps⋚쒏E{QlmV-a L@)5q3q_W-cUOz``IDx7C!.N3ȦG'frKbӇ?Թid5UR4r+(9w5SPM+I(<в}/aw<>>AiOr3YSq%݇/`95Gv75yJ J/+5YGvI'cHE0+6_Q ".ҋq enQ]ϻ<6K5(Eda_$#@  U繄.ZG9pC:*c^pJ|$.8U9F%Pzˠq{B<7}gvweW)5Nuw\Sp@y@Aih,˕wS 'P #ehzr '9lA\8).eJ(S<&c.#8+zJ GoC?_erGl'SN- ^*f5>`@dZksI2e3x%1" ObYxQ<:tVtiD$Mk+$IcaUI頽ZI %t͓,We@3QAKbq>B%,,2ŭƎIH!cq  ,h8vc5uZ٦ .Em[vALPyW/7sqtB1@u *e4DZeT9j4Q RRvJ*0ةxyc'7]hݝ-Aq(L& f$jA `I'x/>4 97W}? }nÁpfd|7=Fi1dl(E#N I:K{2}9fg 7GG>YKWě\f3a0śPjLq7̦G)?3Et}d*n9u G4z7u6:xO 2C#G FELjn~ A DŽfut.&zZ&>lha"W$vKtf A9#:xa~4i]Ke(4y+AH>hJ42S-;N;<8ۋi:+wxߕo0<왩%⍷B2ȅ+ ВoQ)7Y7X ;׊U1j'z2rkB&]ID~nnaK ^WsO皡{>Hzzn~==@ք*g `:<˵]qB1X01JRm~IA(Fu쒏|lWxcJ>q= Qm{G[gB6zvc:KӾ@w,3jyh.P :ET;"8O` yjN@R/H-=Ϫc6}QCx.>~m`9htz;h]0%h]1^=^E{~PKxu(ƓvʴC|/qRp$[A hLkЕY w'=q`:p ^d(IHT9 <&/LFiE  /87k7 Aznl $ gd#IQ6%Զ 5vvu-Z#m}FCPH5Wwl>;s"4h琮@/FCM uh%hHךK !UUZ}ck٘[=>XD; D)x`Ofզ G:.xJ-bo8Χ@_QoAygB^o%vQue4\f7ElJ~ۼ ] {b1roXS}3Ye6cޫa3_|QbsQف6wd5>;w2KmAfEkm,42 0pҗO3q-&äG$s+PM!q5=&d=:>]'o[艬#Ϩ )Ykgӄ˟MB17!-T*6! 6!P^}`rտrX9X3*H+Ù N-kb9#M<hD2j L9IIn2D+S1)][ߡ׀,29?UγbU _?6ǫHnIʣY;3)'Dƒ*RC3a[mDx`d?MXQd~oK+{V=u<;q.qV^{Y(1PC]"gR7BqLzȕ$R9ʱ+LCE3fHjeσE͵C/8˭Zg!$CY22]6i \VZ:H)P)]&ghr^RA f ʲgEkYEz%㔘΍m/]2'qzVXvrҞ?,ju~G%w=Ǫs:O N[JF^^~\o*ۙʙyi(dpd 0 򠁠AF$l0Jn\BZ{9ym@4GWLQ1A{9:۲h˞v+YghM0F`X #@ ٨K a) Q tV%'>d4&sAz%ЃDev6Fv>M+I^ ַةvs$lgThVBUb=LA{e4:LQx 9d,0' F$cnV㚴E uvb+{:= Q@FϭBGSJoX#g/9zü"uv_Y_s5kpEy`MqAhYXvW1/*5˹ɫ9:Y~hTwO_:ׯ&)! 5KM㣏#=7m-8w8f?}q[m,k'=G#IJhE|aѦkoyjtZy:= %J1+Mq'qsp'J1'1H=OD>u1 c?+%[ʬF9V%rRǨ9[Ҕ hN3"NhkG$ΒTN.4˔MB%u<'ǒj8:9[|~4a~ݳBehk<\_e._@;4r-m)ܤh8QIϝFFhaӐU"J eX2 sdxm1ix֟Umj l]y *S::[$h.`|(Ry4,-j~>=n* :g-K&|r: *0_*>Aџ.ɕuIRMOS_Iwd Rr 0P'Lp%75RrO`t0$%x}W_ WY9}d GL62 \3o52k.ȋ Sw"w_+*ʾ4_sիYsWwx:ے~E̿Ѵ-]e?ni-NLLѩNsƣ;%F?X_vx1l8[B Is?_v凋J7[9GZa>^ablŝ+T,ۈշlm bnQeN^Mz0pIVUw<䪻zVgfB2(oH&{_ʝ}Oy7?c4cnͤxdN vןߝ={wߝ_n0zܪчݺaIpmG~GASw~!U9!RY`HWVL*YPFi|TMu<@=TGn-q;PN і-QNDn!pdp*o˨2J@iJYY>u)}d%2t:7YeEE k,Y4ujͶ3iꞆ Z~ +B|tc6mGMl=:ghM'21K q6?] 󔣮P2ys:$>ek^呚Q+ 25h"1"Ә0Lit b& I/>I5Gv<zb Swn-<#r-i^u Che֏08%2o[ **]j4zIc6d WFV4rB~u`͓.m17왷rx 랍 8"kNYs^>-yL|c_?>[͟gj{ɑ_S\YdmEl.E.pm,y$3[-[%Y,XFbWbV@|z;YgvuM뱛FZ)YTޥL8aU>7И js{(!x,! 3Bs=~:d,|7`i1mȽeƔ"j+ *'gDK]c\fٺm}#xV;5_?^z^g+Q2DGݓy+S^(Q""([ R+8AbA\5X޸Y#l{:I[BmO7mV{}oڷ_!;f`!oh)Siv0iz8ԥ].1#6:EaIl㥷M沤sӐԦu7/4]BB-IAy0EDt6bL4s&FыA@Ia"AcC9BMNZ_~(rypTdn[aޑ6ˆ#*J?lq('jxjM0;,G'>ޣoUr궲Jy<~cBzҁRRYI>`I>\[NyJ8N9Yyj$$a('4ac1 Yh/^mGֳG/Lo4{MAh,G.ED(Xy(,L&H,$+vl#vR:hvh1r׋yG/%Yupi6%᪵U4{Εϕ\:^oMS;!@"g)SA&kww4>+1=)DK(ϦWC%6$D]$22.;3nZNN$#ӭ4s %~%_1akk!ml]ݥvwf1Vgx~\˾ipݔTv }FmpKI>67GTnC)Z8 N-܏ĸJNWp=˦݉6_)tN;hkΉ ޮ K7 ұlgNbDa~H ҙ. )hDDPo RgH`dyI<ycҫV'Y)Bw<נ\.~sc\BYIűH4Y вU`=):t*$*(-@  ѯ*l5x v^]߳6ӫV91KJW'U\Hx}/0G(lԂ=|1z>Ņ+h]Cb 5bn|ȺR 94h]HZcҜQ}eZRlYP "xdDiL \ 6I3^@ MfmctH#H"Y"Ve\謏;md:e%g)*+Sj첯wjqujkpc>$ե2|kYdYͤ!YLofn1]^/X7nLf 'uZ:7yްLzG;Pחr18Ӻ!0kLB(J fiFfH,5= R u7.RdYJG>pC9{v'_"Hqmc2 &'/k0S2$%$H үM ŕLMLtIq/Ed(h7Ask1D`U&J)}ZVLXCĖ^eԪBP \VQFgt>pg(XEƪ#H9x[evƮ?㠻V'`˳`{T>(#7 ͕ 4Gt#~ѥQy^!)quh4" ll|R:"΁2g(Sq } ̚8!93ހ K9aYei)xkb&0d4Y]6W^!qVn=@kfmrتDU>R2q`GGbGicƎ;P$dž@<-/Pp6FЖ8RMyr Q%̨zC]zՏt_S R/\] sJ ,K.讧8h<-s@}Hт+3gbj>FMܢ Hߣ_>|L?Oz$vI/>;O뼱~3ӨivssxO~\Б%EhgѱFhwCNr^njc/-6thw_?:?h+eduՃZ]7de|epC1(GKҨgF䍣ӛٝ%Upy6&?zkͲK݄1>[ոt7nU3ony1 z`Kr:T9<,.N- ryX{RM329XrJAu4xg21s!#.)/I)cXI)1 | {2 b:\ GdW7 1k#d>,ωD}h'Et@OR;8g4yDܑbҵ YA6$ʧ?@ͅ \D53+Zl-"ҴugEkJKS')V}LMeS^JR o  O6BR:_v.oM=cH h؈Jzgȼ ]9Đe.y甮ch01 =)9i`R49)*PHt@62V3g;2*հg싅v7w{[fܳuyB]ovut;yB 2bd% & L2RƒYJ922pSX*lPWWA{./)T;@:Sl 1' !e-U˜b jW=Qݵ7`{<>`’*DkB)A@Y$_wΑ! N0-Sdg2PF,7J5%ePLlkg-砇p1Ye!A.m4|eDzNA3$RJG/#rp8[tGкGS&wUЏih^Vkdk ĿI: DR괈8t4:#a9SqitVy!gpzA8ZSڝD.4. 3jt.AE`j=%;M|k˦3%]+?\òjvCCSx3pk7i2bРpZ%7h‚r\, i4=!N $gx(CrSn(Ԧ<.qiLkS0]3T82X)(P@q%@ ꦭ kb|=:.WP zVºQP`VVki| .|RppC%iHڳX[Fh9" u2FNmpIchGk!*eD&/ rJfd/H ҝrzs1"LͳV?5~1qíf?^]-LcDsŗ="0.bC`Igi.ύ0.\G3*n\')Ŋo']D'⸸&k+5s0J('sA:ZqNy}3<@ϖgFp9+ gK6\C-pAdRb$sǙ|Jtu\X*O{*.\H&+4[p|~1WŹVN:_ !MM"2|:!|Ľ,' OMs>.4i]ͳ?V_'o//g/^g A)WSbn?gr{3"d/ۃ AȒKGrHˆaˇd01BP(\y'{WBw+&ǩX:*#Gm&lesژk!hd$,}1W*Gvh1O n1S<{Y_?.B ޾;y7߽<ޜ|2sW'޼]\1\t Tޛ_E# |W -kFl2ns֓`\NڜqojkNm7LJ8?.~BNjEӟ|tk"*f+~,b+H6y]ܯ;*¤S1SyTAn2|U)rcH{ߴٲ9UR*(o4+1F_@u v{O=K3Y`MX2c (eϗ:ntv0)wJ7D/ TGI"  &iJ[5PP9ޑF?FɖS/"D&D"\,$ ND",L!CVF0ؖxN3(;iw ܺQԖLOF\ݘtZ`㭞&J*6""4 KJu, t)#Q}U]mi̧A T* | 8,|(f 5"S$5=5WWj \&욮']}MJF*pZuu /l-U[غn]֭WwtfM Zׯ^ͪClYb˦$ݮZ[=_7^v ziwsބ7%:'t<yı^᨜f36_y,7RN6WyP_[Bn{lD\kje|?#͇IGys}(Q+%D(0}췀ޥ;JnW|*-I4H8 &=zqODK]3$j)Ԓ)KNPa?{`6J4<9p"cZ;R!^~xjDHe3@iJ"xUSxf$btX9@CGՂ3$gP*|)-ѴG+!Ӻޙهla(9' 0H 6R 9׊|Ѱh"|NF[;'* ¥̹nD:ik* 5Ƅļ*{᭑.4hF+5/'ˏpl]m| bnd1,DɝAo[ZEg*# myHT.q~>We>A&8ןqH^iH6aeg._GGEm7Z8~F'J8?q7; )׷|FrԹhbqU~VÙ'nvT _ŭ,043%.v5D$"+);Uz 㿛*.:oG*Vm~Z@dvYYffXh)~Uqo\ujCu,NFx<*yx]k9뼃4#lvCڒ" ;|Lj GzྵY ;d`zj3ɮZk9. VswH]!ɝQW\vE]ejvu{u=+EgTżQ=arRx5㮬\j c%*p0F\ߡ%1!GRh^ } מ[3jɕlgtVmWәJCj;Tӆ4kU]!QW\EwE]ej֟ Օ%Lkq 5Է+.pVpUryeN]WpuUCa1i-T]7^fwl?o_D"sc%=@ertnȲB4[ 3j|;|" `0g b EtR^x[މMh&@ÒbMw 9+v,oB}|sv~ģ>Ҩkj0Cшp/-KPopqq4%A&Se`(W[$g8)bbA$23יc<טv?N=j=ύ} C,nQ>?><~"#1+JRIў7JxPj%ە`Guwku3`e@j $pTk$W91ZY/ o@EQiUg;+_V(Z)K=|"diP_XuϑXT:|ڝ:b9|8%fuGeM({֫keŧW;W] &r٢T៏vx=>`eFxZvMrN:-0((NXĺqitVyp [5cRkS;\h\8AxY\ո{D4]m-ϭ!SzLI V%B/aYXxTh}MH2p}M +* ;jTH` Yf"rY>IDxwW$8 n(ƦmX&1i-y B0CX!ZZjp(Z}Qgsp<|O /V|ꄍZM&udc:!|Ľ scJyӤuQ7nFX~Mx5k- \M9w랞k8\fE?}uA dHХ# xeð?{WƍVoUW[lݗK*1I8U߯1$%RҐ# (e*-FLwidl{rĘ&6F,OA/n}]99*gG]dר]j1N(i#e`It __9M;! &6,?> ~/?|͇w}Ѭ'z)pޥ’QI*I p 񈡱4zhaк]xqysط&p3$gw"o烈f)N-ijR `Xha[+EsU!]-mcc̑yRapď^!@=J?4/ C2^F5YeIL'0t!sqXp{ukZWvwn$N/" JIL1Ц8&LRI 7zIF*;tyD8O[lf[ RF%O5:oRSF)GiG }N鱻&[~eܔ]}?]^YqlcOgi׫49fZr~v(H{wzwoKfuK~u\+B^uïn9-| |o{"oL[OڅP/8em/CV0?NK<1~`>0UFԧMX5mi\\7tc,y91WB4Ah h0p,sl \[aK8Hl d`E)"zђc %,06d *qc*BF]R^VS)cXl)2 | {R b:\ GɪҤW=j+OtO S<98?͂ļ.p@OR;8g4yDܑ"%FVJ If#k 嫘Ph.0EZ3j~RgyLYt7~4q1v* mT y0v^Ͽ;'Z^dmr@)rI%.֦wW?D )a6-QIґp̫@NEC 1XwN-PFHGYГ!t$ѥhrFST2ڑ:#c=R-VCcb᫊7{\q[Ö+fI"(~ 7OG7c@X2'6K)AFFqJKw #2c(ɞ %l )u܎BNhZC,hS9bWSgIH1O%jW6guJa9pk<8T'l\M 5X"ٺdrVj<̲{#)C&dHYtISK c*1;Jv ?ISЈ#" q;1QY$ A!L; CV-&"J%ˍ`\sFDr+ƔFʍD6+ "j}0TR@*DQ QjhrU{f\YJr\C\./R)ǀKe)L̐f̺d%c34z!.#.J=Qc;/Tg]NWc˝)EJ.gb ;~8rs>q :~oLFޙ.@;0/hYDqBKy!sm\[GU!mae M9ȡP&NC2z۞`o[ aiQ2O0Y ʩT*gCXjts:n ) tAX"RzAe QxƘp4.~Fيom:GQSL?cVCsghIfĚ1_b)19~Ͽ qbI!HBx9^(  nni)xdəij*mei((& y*}`\dk x4[Ջ_RWѫYz-JTm1 S˘\BEԠ?!יT4tuCW_nWOs#=MAG~lҕ ]/v'Wf=$o\yOw~>6{Z_2~y]rqc__ڷbfeK9ϐ/lk3!7/ oo[[kfc7IFReR!{2Ln%}n,ˡ1IՐ?{MiPS.i{ ]gwu m='-H]<,L}9&(uypVԨJ{ ^÷fG+fZGergzw^KZ]|p9zk=CT!ŔfN"9NIL$A[%aG&lbKp1 bDMzD nЇ\# #)Cpo5u; Zu[7;60%~qў,u#5|i֌ W̔ݙvf!w}ܕ@W*^M.*hԩ(Z6PIhv]E x"kɈZ: V5ѠceSkOiBu*!=iwVeb< A,6\O#:X' J A oCyqQ蔊@aYg-fA9qcuAխ:WK]6\Om$8dj/ .M8 3VG+HJ\ƏVcNX 6^zdU{sho}z]k {pI֤ dNj SQDDg! o#ƔFH" - doqowtNb ;ecBctY>o>?իǶ c A=TѠFyH )ŀ|6ZB] %6d@zK[JX^|WF^K>lb 4r*xkb^Vz1qPQ;{X"؂ǯT3ΩtT/F}GWTN1k Z6ȕj#%l "x#:49O?pݩ}U"$`8m%}:A2T:NXH'Do8NJo`y]3^_{qUf-&;=;vԜ<=&/ (w_ Q$#Pj"q"BLC`B3TQpK/N^[Q:tZ(3R].B%tpʩ)~GnJ,ɇl-"cb?MFRd5k;́; 6d1P%a).}V<ܺBPhhdNίz:CyZཐ,fLp^Xsm6фQ$ZJH n2q;{/̉q‘{}b $8 @) 1#{. ")Xռva}pg݆> SQvf7\pW^ċ./>Xqþ0nM1&䴶,SN;,g 2RJ5 i͡Q V! ˉVL =D Сea/\Mǣӓt.ȸI?","o_tԬl'.\ڶJhuv&q,r~>{&-m_?qE߰L.iō;[=P/]1zIoiaFxO[31Z#3G2[a4O_K@@J뛁6[IE]Og-Z}!<1gRqp+zXf=}$*KC!|H%r}M}C,EWy<ᨏ D;Q |;WV4Εof\142 .EKdJKVڨވjZ=NN$e}ݷ[in kбp0>i c !T?KfZ#,0TPo_jJ*+Ap͓'ݫ+wx`(Մ(6';Džt:͓{!Fw6|e^6]]xdVy:PvH=kkb?]I,`4UB:vz\Zx]-X-JX]qd?B:3E$%ڃ*WZ s,c9oc Bzi~rx{*TpG%T ZJ4唅R;D:J J8|,_Uj znYwӬxLpຸc)7%@/^oJ{ 藣EnkZڔkXLAsCMtPH\vx/K}A2-),`Q<24&y]moF+MC_w{Iȇd؝ܢnu%${f_)ɴ-YLYl3aKd鮮*6YԅIh;Li)d>WesǬdFuhDU9MHf('[\U˯ZUs?Q&UR/H umVh/`UELWOWUTCM!:l,"%}!3,D kllZ&AYR@ p/4L1]KYm(>M@D$19 I3}t&ޥѠTwZ$[qrQeRn /W˩ɕ2 ,L$A4@7ڇ9xs>cToIz/TI 7\pc+IfKE_)koϕ^A}dPeueA,Ѐ7qOY~78/h0K/d3ެ, t[W^)Ny{݁x[:hfaǷ`qӅкǘ dLoAEex t> KFn A%S-^qC3­(9 '9 pHV''e%'''9Ʌ)ò8Ӟ y<:xRWiiMU|lJϲQ/dd2o;9I8 _JD>Op}F';vk[U VBzS^`%[!*(T$\EZ6[&fO*zxfk/Pq&}ҸûZhRiqjA|n[-]3jB!o[/2G1Ad zǝ ~ Ii zPچkYߢ2RTPU'΋ǜZޜ13rcZ{cekHo6\sXVfPoZv]k8XKH%Ѥ1p@zzQ ާ02'^Q}: s\ަ xCGCXJ\TwٖlYa4 PP1dD툲!'pk;~s+*4 "44jOCӸG + ]!\;Ү4DtuteRFDWCWFQ>>G)D-c(++^|H`3VUt <-f(uǂAހxOW $f,BBWV4tut$<&uA_ECWW ]!ZfNWR螮ΐ8EC=<X ruBRtut%"`GDWWhADU QJҕTT x hA@:]!Jz:CRD+;#IGL 1BA ,"hhr M#Z'ij6b+D}U( hJ0#:B<[ Vv^]!JHWV cRWEC~VZ~ Q>|=t%j^ F}[iOLWbhzG!ЕPSC~{m•&Bue.~OWCẄݧ+l .Ut(MΑ1 "AQWd3+uEU,3KiT4gcgD͊Eg X{={#wG#ʍ޲]Ȝ'{sFT_/z MnAF]BoAUS^AU0),5To/*Oχg1 hҋWXҋV鮧}z ӋJNckR^Di!HWh]QW{1GA|54D);ookxT{l45ܴ43iC" _bKp[tpM4A0=XOWA9ҕ%Rvlm]ZMm QVUt%k^ 1֨US'~YH#C3]+ ЕPSDCWX *uBFtutu2"BGCWDCWuBtut!(0e2BBBWVӮ3+<"B6`DՃ ўPJҕdZ٘RX@  ]Z;jGtutK$Q1OlmoiD4U<9;{6BKIiQ>gw4:<+6ds+-1b@& heޘՋЕ)]`~Jh NNWR+UjHxV `E>jWx73DccwDUR=]jz*6"SW{iVԤ3+&V2"ƣt( K;`T4tpOZX Qv^REwSH¢)j&;0}+m42YVtyFl*{XyK\͖}-cIh2k62J# eфsKhz(b"leua\FKUuC[5x'Mf$Ҥ"ӹPeZͅ2\_ݛ߽+9G'7c_?8 1D&i0."!H S-ߵo0+09](ܛ 0 i#ZZWy|wShMƿ됢]_]׏:=ڡ.i摣QAq*v?ovmhX A|78..D޳84IBl5ɂ#psHjQ|&]Wࣛ?ܸ΁"$>^̲V IcqT@S1fDXb< Z0-K}&(oeJ5vIphr|~/=~*S!!mN,E NcLgFCs)TyG8["4lBdEbQ#-;?J2kЄL!3ܙP*ƭv$AqV'(&@hhoU?2Rd:E95.^[.)N x2bH8۲~l ~XyO_IbVvq-7ֿ}jِ*3L )\<:^'Y} HEMܙv_8u9QK+wEdD\G',2gmEkC_I3ۈ*[Q&~cpEYb$M1DoMNlFbqLq5WF>]0׳1,UTiQ]R|H/؝84,N")uZxaQQrǥ^ImuR9*J3ZN saOD դˠ|`jXSDi-)Sm- 73yïvH<|.Q:y%Ph4ܿq}:!+g:(+\QF4ar oݥxt4 =>.^AMn ]Oysƴ LkLht1PDn5R 8BP\hXL(4Q/ɤ[jOuȘ$X|y] .7`Mc)sfSJ4AIV ZDm@L43(K vl&a0K7*j&'7/S-L_ T#fDއ`x.aI_Xgj۸_aݗݽxכ\Rڊ/rUbL )q_c8HC(P"qbҀ4eeOy`/-z奄]8M1? 9yk/3 )B#lJ.d ?yrgk4Wgqd$Zq`=!HtByt̮ŭؚ'd?6W>f)6 14됥O`jkC_?^xjvK9#RXgS+rIzFm8xj`mLMyfOl~nm`\2sehxq[fveEmM6wv|;8+''J'.6t mfbmc71Q8yJ>.'z~jxepMR zmQZ)<tĥϷYf+ 9`8 y`ǐ!f Q2px<%~߼w~?}7?߽UO8p' <yeL.]n&CrK-߮1aooù!_M؜CUa]\ ,?l_ o׹ַ\Pb? ж/\g:Ysyl,V}>:'7;Jg Kx&$8 epC9gTԡDT 9 gqXڼVߏqb'{;<}N"M H0"#6D<2m:80@TS`goO߻c͆b2@:n5>8]}u);Gt{to9ic^&IrsRZNN&KdKGnǞ w#rRue{|OT%,=:p ~jrYO*Y~\O x{ȻS(/˸MOnF^R\&8\c翠QO$V?{/i{wxeeoKRbmPbk[7mc{-rFw[Z-mG愕v-oŶw _1&,*E,U6,Vy;B6@數(*BW1]r>[¡%Ζ@ٱeKx%Kl V D`XL1zYp\$3@H֎jt虂(Mr#2oCxHF%`)MfI|>S(Ndz&.^7{Ћ0sq)xɒq8l 4 e5$ )¢%Cلߜ"#HԀd|I3-/(s\ѾtRVx1<#,ʡ8Y9_@u J~ xKo׳uNK {=%;u j%&.lb8bIǞVV[H|V1r-ElTiIsD(֠;HI0}i=dDы,*5!sI#^"p.$TGNuTtb춇Sߞ XbEz[ĭ@ˤXZDtyP!IF(8RFC aTQZDT+BG$8MθQF rHn)W*:_C1v:y(1u}"-lio{Ms !%@I@`:$Qbl4 CڒN \5 ;"9ս]<]<{XL:=dʹ_^#;e").?PQK\nq!KՎ9i1-`,SF=ȡ\h74W󷿂yyI+U֖ 9.dtRK F/_/e7JqqK *E P)YH:*r :|s6@ۻn/150aK~!9(g9d$%@*T"2XT`ݖo_kZ;1J;s6ý2%X\\`kk9WQFR8 S_ 뙱1$SI8ē\xJ<80Zks R%a,nZЯo:-GYy}S)ɾ(3E^`͇ࣇ/1F87wS"T"#JJ& Drzx^)kD>R@Ց&ˋUTD "8O( ރN|YI^,P lif,;H7pw"z|?:St5m'~3d1^SQa [ϭEq:Do(:tg&-+lNv>|x7&啖a6l|ݘ|kkA_=YٽQoq+/v",<.L?a@_ *ב?Ȱu2 3TJB̭Ǟ>sռOhIh/U*8<^c ʧtdDZR%%羢y\`QHCթbReHDh g=#p_zq/^o6* ΂SQ0wFT l4s*1.Em(Neӕ,)DR=4ŻWcf[6 ?-V?s*abz̦qscnȢQLVpWF|'I1(!p| Ũ@EbT KQG(MR[oQWSAu@v`JzhǦsNsL|3?iCoQ}#mg7Ժ-V m<`ؖšχDʎp^АXYQY-qV$Wq{O¶vѓ<'KFO'h`FIb0I)K9RXfF(|Sއ /o)Zu%szqܸ=bXpgXC͋`}^PJ r02aF;ISi8O S]O}<"TqOL3U"'NIPNm!B%W t5έȜ <Ęw@Ă憀Xz% %mߚo_ak>"_b yX6ӶA Cz`az+׫ '73+!$G7{^]ϦT陠xEZqFBeךZ VHu ,q/{O)Z5'm,$1AHs F8P!RQQ=9{X&؜/T+ZfKi^RqÁ=Wkį.%F3*82A33H @ߦ|р^(N2I!N6*͸gIg@Dsmȹ}@!zE /c4tgv7&\/'<MaZN/(jz_sFQ{ zpe~0 CN3ҡq 'NAXBTJIW贲dMkE ]pl36+hrþr3ntB1:h2 VY*q %v)boo).-A1eF(U>1\ 0AVTE^s``I@\~vxqgt2g6&73pχ4]<Bmdd5Ǭǫ.Tq&9E2%vF 0})3sԃMc]fjَZ[ݓM]&;QSW]țn俹y1.t.cFФ.^?C?G ,~1gIn;ZokLu4\8 o/ngvezW.̻߲W>1kP`cY%7uv]~(ovKZH4ߥpv25 1n>N\.eU̥. BGKWicGηmPsRDn?e۫rlGb|:6&2{ojO+w-a( k$e[r:6jruM\?)Ʈ:"w m$>luA~Fo +jGI.1FD*{֓+5pj89"lHE[Zw!c`y΢dbavy-]]Md)/zo%ݵ Wokg-}?˳ߕu٢z)V 6>w_S6h}r=ql;ϗ:AV-}@^Ϯ4}֐^|KokeM{5?<,^^lv274XQo-Jzۜ|,9uՈZm׶y3Uk=@Io!.{#^FfKcA_q3͂לc_Ն򴥣:q-l;n)=oe}x=8׌fi\C~IC駵&8h♠׺6onbǴƒ^W_{jeI=݈ 5CZcRىN^ѕB腋x,tв;vJ%]-޿-N 9n(ݑw+?c^Z]0 ]b,tj{tP7 ҕrRK="jy< pi K3jpUo\ͿCٶC S׏fhc醒D'Hyݨps\3$ ("]7%;AWF]5;]5^NtutGDW^# .ʧݟJ=%'AWo>zCpneجJ]g`cfczv~ޗz4]yzN:?tH?}ϯRx^>4"]߭NhǗwhM8nW70Ձjom+J/ъ+lWl~wfD*̋~l_\zu/9vv-nujg)+-Z3 G?ͫӗ񅨺:c^?=,@E~"q~Dl4'>o?o3R>0@1F 4t#/~޷g_JA-].J/Rt>wA:qtۢiY`dVQjdMM2hW̮yϗ-uafi}]&ywaހ/~[/n\~Kdj5UWdE h+A2ߪ8 ף9:b$h}t](CB@lSFb*eƁYRu"ې{;ɛ:[*R|gQ6LTac̕>;cq&PE.JC%'9:DmDKPLvHP ,Tҗ1J:Hfd5Pc:xnDe] dKrTںjQV@FG()SyS$"D> 0l X YM}cREG%QCegg{@x%<{} MJthmk݅&aM*: JX1T`ߦ@p9Wrjrm& \j3f!V"v{@xב ㅧGE̯C*7Y㐤JF5Y@>#X`,H>٢!D.kl^%V*JuH(%)ȌU{Z\2%dbgP[A4wK 2M7)p##+V &6H-r Vؘj) AJdM`!>Q`ZbsQ;l" {i#XRNI3˄>$%Ef+ke熠EgaE†tA ڳRȪB"W K"n{0L!DpBѸi&lzS \DlxH v *(:Y-Y?ehm+>vV͊"/ JŒ TlhsJ܌A2hFG-S%`*)n G%l ŐZuvjFS 'FV4 Rc#XaҰ6k s1E|(CE Hظ\s( + (ԠR)zvC'Id-t*X$0V(UM@F3 l҄7#I!GE P ig!*#2 gQd#X^ 6=`̄ Kȸ@#MAB7[à@$rHBB2Mt R_IJDq9Uƨt#X,x`&`_ !.A DITȥ[C&À:3Ĕ  fPFȤ}B8ٖ AP{S4Pq 92bFj` YaH +:~;)mFbo.5TuA$"F_mhbՈypթB_ApBi5$Y(m"D"S۝@V^qq1&dȏW.űF6 lAi=%U/Fإ·JIC1Dj$%!py_fllՙKwmߏMkvǟJTms5y:6=X G1^,*#҇ f%kOAҥ`sAh)CfMd;FLE8&5`QEx_b =G!A]J7 ۶AD=LzJ88R M'^x BvV;V:)QWjKG0`,XFXKff%xx9wB!2l.h4pQ~k]0B S Pz c ?v^zRMWtSܰ2VB` x*Sgb 8׵ߋH/E.dP2bRY Hp) WK=惉ܥ' 19_\xݟry9~̫tQR4l!h|f6ou.>g sNm 7ϜhnlTݭ#q>?_MlAݺr3 Ա9R 7_\!;n?W'* Ktس6psP>G+tI* "%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R蔕@XI ,׮?J k}o@BzJ {Q@R@ \\OJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%q+O t_joR1Sǿh?hO½hOZhOVfJyYR=J $c@@0YZXݏz6w8}S=GӐ@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ RH>)QEo@PFJST=#%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ Rt@ߧփ`j~`)ֺM]B_@cΩf9~,Enɋ)\?sen0r3)O /LX#^;jn1d1cZz\=g֢Lz:΋:.>*SpTJ"}4ߪVZs0@|fG\2vBǖOtЕcHB+$tz=<|5h+TUw/ΤDE<~;ג{8_:]^&]$؝XX g RS|rQKC/f߿joüM7ܲ ;UVJ<8Be,YT{{혭啥9q p9CR8|3Mp}7}LO'`0GӶ"m_po16^t33]tcC*h?܌?-*V~Kwtղwn{I|P QGwKNFf@7?Xp6Ĥ]5Dlno)ɷXo޶Z~v op*W.X'UVeBɥߗe2,fVfJ9Zo=zf/ #˼}oNx+`;feßEi}$>tN [$t.[f2wtA?7LJt|s >̮7\YQW^a<^7r `돣a31k(6&Z6m?t^t?0BU_gf&Q\ܖTz0?tcdҊU46 A$SO%Yᔒ sNFnTrba0&b bxbP.sC.{|c4\ K"%nU%ML=b'aF9dd1&FZeDQab$fkm]z['/l(qE{*5R-:fxD+R)jL+=Cʲ2":S2 J%)yd| xC">%YYXIe~Zlpz^Nѡ#Q0@(K q]ME=sӸ*KU)k=7`0֗%e^q•*Dx2ϮF`XC\pR + 0R o SIS>9XjZVFT<5`윷Uɨ΅bgiTz:ŻZ6ͦ7M!& Gj:ʳz?C'a ab3ݵ7*Os]9SR);FZ3 4'p> I;4u9d]ls]fjz[{XaI6kb|ev~7oyRj堵ۣ*0 ÃAkoi{gS8}ŐیX0awd|ÜlѲnG,MsgI#Mm&n~l1+voŶx֝nGA9Eʃ4\+%۾V>n3SA8tb';1@kY٭=Y5[~2O3:_?j6ԣkōsGpFƘM>&GQԻZG#ہwyPG?C )nsk3ٵl'/ Yēn1=F$|g|.uĪ% i;C[8:yT;ܣrqhfz.MX/<<+UMJߛ!ueɨV!Z|fDݎZ釭CIͯ<_Mz6ۦ]g'M4@]R1(vE 0qukwH-vd&;-_ۯEؠ nw-ܪg^ :!QCnK. FfVۊg!*h-4*@)R~q%~ %~(\'@x.溆Ņ=c{ 4?sVf\[pTݭ>?_h7?fx@?rmmM'͠=Ady$P5" ]0W[<ς{hbT7a71ͥןpօ lXR޵q$/~܇ukSBRg+U"eDMǀmCNWW]U i2D [k]օk]<.ԯyQutCiX,=Fa;m}*T. >~j`9䐼'A}nn>^dDu|(Ɠ7W"+i]pp+@da$B| oБp=tpv O!jsA621K),]T.+g* D0%D,&GW4[-Qu )yL,+ "#kTF /7!7k BAGC!;fbi'cR{a:Cb`KIDeHlbEc2 ͂eJIAsҗnƃAg&g#w7J:N(EM+]p:*ykQxW Hti }/UwގIvb6Qa+OWrV;8(v˹37\xN6b!A;'Qi'# !Ll<(\jBJyϹN{8*`X>b 戊5(YT&*cm\-3aFzm`+ $"dK*~ LZ %!2'AQAMLGf=A8W?V#gzd$ؐr?\¸eڌe;[b<-$kAy  mX͎=' I-ZpU] &R.Б/EEY>W3yZq2K`V3K FV2s%J̒o0D®ty<޲0PN[6R6>An4/1ZX7>SVB#J;a3R{|: U =Ž-Y/=|T]]>]^7.V!HО2e'P 椕N`H&2\65 g#G~,Ow[><`ZHY݅Y}r66s'|-aȍ:4F;mOw;Ntލ:\"d2Ź7zM bpRh饲&)Qgr6(??U7I$ Eޖ RN(3dzc *oU#gKaMfnWLA{S( hh]w˾o9Ħ+YI3~Ɔe5-\hP;H؝; &ND\V L&%Lai\#2G_ui){l/wn[HM%scɍ.)hlܐD'A@-UGAcP7p.;x8<.au-۰?Dအsư3frBӴ LjJ(e 1;quӛQLV]LBG &MEx$I -8Ct)9'"C3ȦTWvi H3ܫ$=QA Moäq[$J *۫ o@!a~=Z錀6kAyAap=* 8`fƌ$M2k^~˗0LJa:|jX^ic./y0/iso9uh/ʶ$/ןߎXao]rk5 lQOl .nRM4vEUL !v]~,P_-^HAKI2{2X `Y&)0l 9ڠ@I$f["pSo{R5yA'ت'mn} \kywLi&-E=y:¦J:Uu8ړ};O|mOlؖ/A8]ww<}jyxΤ!{`Uty}f}-p|NNT9SˬRӍ)7֑τ)L,n2W/by.V[`ŝp:_i=8TƧ&.=/: (+p,p0cAn&P99;T3{u΄<&C`Vy26eh2S$ `\KϹII֌y\3*ta5U^(]mv]qGŶg7Ӹ({Ͽpix1r)u(HeF6C̹tT\%F rKU<[#5DYIZ8{> &CR)!8\2|;f 9Cp:G,[YcW#qƣb֮;jmjmz#؇8>GYo#>tHMcv-(9:#; ^8?u*}UC&dȁ(ѐIHΥ@a&%2c$]9֩_v^*X4b5U#׈b,A!,yyRX36ZV'K@ aU5"cƨ@r2虐B5'D`7jlѾQҋf!\%EQY/^/z!/JkϸP.s)sDØ#g90WdL#A^/B/>CYYTتLgxȭ*{61w1[׆\N?"3^gyY`G)?paOEheQc<}N٦3Xy#>-,zp8M]*\E;5~w~( F[toy-^qoYDF<q9Yhu@\s>곽X9ӳ>,US%ngǩt㬡vSViӳ\TpMxI"h#%sEu4h&WüJ!{=zl+u(C nEA*MVxNW{0&7~[4y'TIIWHa[M}kmƲEȗŦl^@>d;M 0$`w >meɑ~%۲d,eÒJa<%QdY+c4Mm$`vs=ޞԶuǞi{^Xa{.Ex"FmmH1Y%RZ@]p;5|j8H4$CBK#K$c\(&I(+}`\dؚ;ɇU| :ɧ<7.h2*{E^݂|d>xѥfrF0oEnTtuEW/WW/np5[@w.E>v{|{beW||Wf9g w,JX*ϕe9T&0i`wNI{j};L}/,ͫw;:I6o5h F2IE$`3I'͞đ{ 5ʌ) 0>&E,m2pC Ld*0\faػP]Gl'% ",ebxp9zk=CT!Ŕ͜Dr.)Rf<m(q0%B5Y#j!y4YZpIIp%9A{b<;Hi IiVW0X)Io+mQÏc8c)+uR+tK]_FJͬ- Dk` \SJsa0媿 y5a0 $[I±6!ջ޺0<zyzaGJ1o{tv))HQGV@a=oϔsuM]4 \BNӄRhдֆ:EtE G ^? Zq^!]Qi/ڷUAN޺*(;*U+ll{+kyk誠骠ԼĎ]/ΘeLI9i]= y!1$NJtto׃EEtV2WUAܶtUP++nIZi*pmk+B p*(:zt &#Ue[ r5NWk+Pi]ʴ \#BWV?CW$ 0h ]Gk; ZsbHW ;h~@ 3o90%6G'ȶɪ$(ekh.{:M+Ŏ_MXKttU.Bk誠œ]B`,kuUpG^+ &שUA]}=t;v=1 $y`O^/Di(͉>}4- BW-NW0]BUlLk>{HDstU qtUPJҕR5ktEEbU-tU8u*(O- d+ \m+Bk[WdUyr}nA`nM0恪eH ݨى5#zaiqy컋h2:^^LC5TIP!bj:oipvQNFhM[l,TFzP"~P5llimº}$%́V^(P JK 9*"Mx1+g9}`@>e 5&jl1ω+e衔?|ZB< @ rtN鮓=r2'4AsKo%+BgkYjY%:*bJ#cŕhg]LewOTiQ%IjLdqbI C} i3AaM\#ƉH$-'{YHE#\1oӬ22sj4_`0 0XfYo[3QĀ! #j8_`cp KH'(p>-|~~ȩ=DXOWԃv%űKX=|bҽݭ#^rusJZ?nq(e2LqRߒ3:e#ER'#3k!edg%ŽA (@e4C,tɔIL8.IDNj Krʹ9WǶi&ßx]  AU[sDёqEˣ0V13 ^n62y)] ^r篚 5+vM<`v r%΢)}s%qKI '9סrhR8ATX(t@E OK 0 =&U5Oz;{),͋?`Aܓ˜o@zŅՎ0{21',&0FG܋ u1e䲈 *'yrF$0#6l?6gXcBE"1N+q~n&p9$\sb5<}?hg}x<M[v~V/@3 `Yտ~;IX]7tW\_z1QWn|DWt%v$Ԇ#+sDA+NRu GRr˛\ ŴHlh\T>W,[\9PҊd1Ȥz>攝B{{Vq e>S ѽT!K'NcDlA+8I6!ҥ 9= "ՀQ˔TZLj, tPxcz !K:Q[ѲGATK6=y7 ~hI;?kI?b~EVl(|" ݲ_rCβ]Ӛ'.ȒZ9esLRq벡4Xz&JIbRB oZvH`C* v>(h(%xzv88P)%42hM&R?tYߎ'cԣ.Q9N76E@dA(-}yqKYrogv{,// iu&XD?7Az7Dx.Ҵ7ThRNDLd\70k}WS[Q3|sĩ#"ϠIń6e"l .$)MLd>P&4\g7:\T_Cccbv4Y Ϳ0/ , !OwN]aoJו1Ym5 w.~7? G,ߏ{5ْbw[O״ЦWey5i%1ݴrk-׊6-,k_l;Ke,-jN ;rqʻAۓJ^ eV* TT΃OB;$ 0}Ht! d`E)"zJX`m TUlISr))cX9D`@ b:\ ]msGr+,8=*9N*UWW-yhA QY,HKr@,KztzqtꅟM,(Ő"0~ d,$3syF@zL$Y(B>ʧz<(ن$g O19g&\Hr.ךPu-uoth-MƓ˸((af'OoGo=Q H\;U[AI?+;GR<[M/VPy@r )a6-QI W252b%Qc޳2HNГ!tɣKN@  eXMXTjT,#hTŒ{-R-_-hd22\^qĎJLVDeNeqr.9Ȉ1NnBa0\ݶˆt"g{.xM4^hQg!6U9̂6:Ϧq<ԮfODmv'oDX|%;U&lS6ȃd H,YUYH@D҄51s7AqL93m@NuҮvj֩_-/x,X>PaDwrIB&2w!C7ZzMDrUJ :TEDV)'lVDs}0TR@*N22"VSg'\\6:Xg5+y*.ʸG\qqŗJnc&fHI3f]ޒBƱIzzǂլx(*xnZg>,kyJw^G[[=4U'Ipc+e? n7LRzAnMW짍m{u=4>J񞉳f9[̮VK#m z4Θz/{fIDFԼ_7w[Wc^07:,E>{&SC^W7[ΰ('lUՉR; rرvRжqJov\2b@@-WJl2gVap٧HNuPLa#ɉ[ᓜxp vu{% ռ 俔R7MJQUp[XvFFm1FCS X$`|t[Q(#etV&Rhl*,T*!K[9DTbu? )K+Y邰.E:kk3ʘ `1!s5Zi&20XKB72̶=ԔZ/ԟj0ɬW\>)kQL)ͻ$ '3ﲨx*.IۮU1ϻTq78R)kjktƋsM,5ul h\A8zã70.b`2^6lytϖW!~|2޽fEK@UrjYNm"Y#5W%퍁4jnt;=Bo왅zwV lhXa]w+wb6!o}tȋkbܷg'|kފ14ח^:tbiM%É2,É/Nznc(BF(o><Ml7MFRyZ{2Ln%}n,ˡ1I轿x qt\ǜtkށIVRKn.sTDWu |&׾T%sveKg}euj$Ѿ/ӿd?^a9=靗Ճ?s*ib~rzB)3ڛ9\R:%2h'1x *Qj҉s@c*Ĉˌ4-$=HL1 R߰n`f.cS{|޻7R{\ffy[Ln,:o}]5ybCYO[=_Ml܄ )N|`UÍ :g ۆ$)'z'+Y[`BGab6HՆ\؜k΄h71Jz]:i4v^x$˜Œskո!i`K~ 畨X``ԇ^$ft_ua#=b(ms?o`n00xm, iC{[3iɏsI%ZZ!:I.a*,$mĘR,A@Ia"?H~D`I//_t{,Ǜ2mGJT`TocEH ɍŀb>ZB] %6d`z+[JfO/OUS`u sGcM dFN ڨ!0dtLyGVLVw[ertj5ՙitTD$0'T?O 򓩟/*{*E[~o~^kc^*G}SY#dnXֲATc| g9b=Uӛ#7\MhN$-NI݃C$G.ED(Xy(,L&i&.R Xp^crw+R\vx\UCz?br{~b J;v%_<9f ]IBFFD8 "[^J2C!Z/T:yE{G1sBQעt祿*%cS*~D~漿FyÇZfE\OCZMFHwSzaN "G@A2TIcJեϊr[W:1h!0z}2G<Շ8BO/֐'Bl #2yͣb9k+8&g%RB2lDZdbw^̑@p2rO"གྷ'hmurvB*&ȴ .Hrp;0j6>nͺM-zPQ82OGڞݥ߯ p%+ ^6p:}0nM&1&䴶,SN;,g 2*xh 4a/EvC̯g>g޵YGTn'%Ut3w6JI%wD>H/xGhHyլI|׳^uswD66G: i6 g߳gy1tX+mB9vQNj0)m<[|Jy*C0sY2Eγ+.jB߽aWrW ɇ.XFiW%a?M(f&=?!_]k0>r9PV9j6Hn#?K>J>brmveÖT-o\v im'NԚK2cOÝ|_~cGA< ǻ|ly֝u}ˆ~HM l'r>ȠwWv5"vZ?pmc:s35wO#;Ν8 `ܡ~v*u1znР5q$ڵ\I7uw=pFZģmnxe/JԈhFl/yaF䢄DDP RgH`dK28LJ*|ݜ/W?="!,XzzX вU`=a/-CTYeD%Eb!_4t?6]6Blp{^]ҧy1' yn~rwx:XW!٪-_\. ` ЦXbr nԠB&*cq+-]0jeZoYP "xdDiL \ 6I3^@ Mo61H:$$,B2x.dtGvQȕ)n5u$ⷸ2>Tmzwd5Z.w_=9$H&f՘lČ|u%y4 +Ju* ȊZ{7؀*Ƹ8!"Q듁S+Vn[p-•Z߲.m#ǁ#̒ Ww~H$tyC2g&;:3d{(zJ.}ݧA\@c#]&E E*/@0G5&" 5!,*fW6|Kl}W.wOwB{r۬$7 feoPIKm5FGV#("N2T$ؔer*yl4C/MMCI%V7۝Kl!1ž Q;2ӗMj/Yj;\Mf}&czWZ>2R)~cBW5_W*1$RZ@L#H/^Y rpGI<+<)J"PLTm10DݜaKI 3M#bYVӹsY\UkyoXN0^WMͯvSu|=Ƕx+N#5Gt.z6jei-[yGɝ횹+DhJiQAjЯ ],5:{ͽvS{Ouk+wb6!o}tmgl~6~k{v.}M7J$\Hqm\_zn* -"˰r_?С蛞wmmew0R_oMN,1TK)Rp^DR"-H̰ttsp_.Ȳ!V\2ZJ߇-hJ;&͕t*+rldFɣM/|;E]Si{}<W 7 2JXBN@ɒI.k$?R!*%?ӽv+fY}Uo/$REĹtĄof=ju& bhZD&b.ldS5rf07fU5S\ot#Z(Rۀͱ#v )i)jQ )P;.`eE릶BWMG7IǕ'l5(JX++,TQL?\g\`\'%^V7k߂2d s"Ig4sQª\ 0%76& F} ܋[q/^B F+='39:ȡ5յQo[km&ijGԌKOЇhzK VVA"}m5fOH0j~;zI/M4kzGz̓AC~/J&bRc!B5KFA;H*gTZUI=U^қS1z] /5:{i-AH1{e@%!fSP3Q,׮l,06J;HuHŹ2NJBm2{u@+O΀aZb?98鱌"qw`̊>t.A:h0," lpzSz')[TC /\x5&;jE^_WX)/NJ& N"F"Zq':pP RH8$TP`0BR1Z VY]eKzrj%ixZOJY/[(ܕ'#.f8@Fi-Q!k1D|IAdɼO;gt3Li=ym'b2}VfF$r)Ͻ&*$PR-$O[DBk{ADDВ8?pC@A`ĔuBBQpY$'[{z ̺ -xv[p(`Apn?ȖRB0졇MЍYnW.Ɵ[㻋&o9ϵT{ۜvL{6.C9 89٪K&'33^t ~u"ܙk.gh[4i?FiE⬆n ɨṅx7-n hI&ZyѠUoܿDs"ί?]AIqތdxg_RŁ7 SIbēV;Faw%?Ķv|)d ]I}\'LM=ERxv ۪ݦ%hhxp(i|߿hڸ{z9DZ%ֺf 7C!ܬ8&ךkfX+)Z񈇯8pJ憭ٯ?qi4XETkrԹe'ܯ%kÁtN7gY=x!ͪNK%4LV)uhI1iS2U(ٲżi]Œ\цk U1k| Q Yrkо(9&+2!{$Xc%RUp}^8-Ep gJw{F) ̢^i%݉D-;E..xetyZ(%XoG ү1uWqkb>f[\]A˻'S E\xk3GvG>Lzq-I_Ǔ/D_M.k!<8D*WSB̦Jp5n <>PXc9@o%wA:fE: ŠP1 l' 稙 v]%nmC@I3B]H$vzV{)0i[ g'Z./-Ϫc:}*CӼL/s\P>ZW42*VJR.+.&md+ȕVLUV\t(9KU0/]qüX sD ef'c.hcJB`RRpUvGPm agVOߋ9o$9 jVr7fV'TMš`jƸ['7 ipW|?~ -#,IHViGeL J8q$΂ lFYEP2*i3FoHF81;m娖dMLx E1B85?|Ҙf,>^ιc(GM(>+]p:*ykQxWڥu}xcVwNw mj;as ]r6N9~_QM2`_MMx.[_KM2Z^R&7Xc\u%Ml?Z< aq (0ʚ 01Rst\ҧ\>W$|8*Ra-&{,WA gri H]C:%x%ӼOͷM3zSo'DTbR $-D*ѥ䜈 y$ʠ=cvi==˽JIҊ 2%o&Tu_T`sڣ7 KϤ3j' {?HF|X.pէIgp/oauwjK&Kr$O7 O2#-*~pG}YqK@10L] 2xBB ݛ\.LdZb MH?~E},TI֚'L=]m:_z׾$Yrқ7KwiM99[=E]j4#)v^qI[|R5[yӽżFk3O;?7N.g- !.׋\a/.s+.HԆ?A>xw釋XCOƞ@m骩܍b,{bĄQ-h } _=՟`^+[kCoq,_Spbלr ⇩_ML^5QQihbT*"VYE;I- 7']H`)eXP5hKvё rKg>u78E^Usht8<"̵VgĔf,[dѣL (C+l Ub;i\E{&i5?𢽝73PљgXչuG0Z,GG!Mq?:'D+~<N*)e;] Lt I:RG{n^l(]rٻֻ،,ùk/㤓zhZVM(cu?_b>0n{QR =W6;[6Mr-}On؀h\nnr{,y]э{!voMmA_K`mnO$ XhW|~P'+u1-y+@iBTu{[{QB?k,GzTYp'1#F 20c8JQGTF:5 Dz][o#7+Tw6$;` AZR${:{X*ْJ2eIJҝnb"?~<# oCxHF%`)MfIr>+&N筐}N ΄zQ&uTPKı`~ 0NQ( &ĵ$N$_@ HG4?  R98L9"8\gB7:18͵4GwO> {1=9.\|o]^Q'un@.w{Ζݒ/VPr1!$'&p)ZkN]AQ>q Y+3JR9M@Ā"D & N[,6!HFbFr\ӌCXxQXx/Q~$I]|Xc44Mj̓hPpqx^C4@z:iF#719h g!i$ځ-i8C6le4&XLP m;9qD҅8ۍGa<.6%eQGh|V1z-E$lTiIsD֠;HIxDN:22ChE{bMh\HHcz!օѣQ-886i30 "ӏC #"#VeR, TH-bV:<$Z gMU)ZQe 0k D*qIR Ӕq52J@rŬ}ӡmĩvY\tYLKEZi=.n-0 HL1*BH[Xۿf rG$ca18Yaq]~M`ba-UvG.t60WkT{؈Gh&RE2W&b&xI*S^7g/eaʎ/+`n߿|k}TGۯ`%,+P©#0ςyGsGIps=哦ZA$YCŋ"F~Y>OzwnՏ w:o⏟=y.38bޯޏ?{)1҉uBJH,8 K T;yn(>@=ݥԜ?Xyfoa`Pr ׀mM&'!=?_y4?_=vfjgy=w[lBW2t\vn]˗?_.?u稞vr\'81OZu4p^_AeOzJ9+_ (V%%!:!c$CRϣ9SW8H8g(@94ؕ ZdQn'.Er(NeX-*=U F/>yww=ήWcH[r1YmΥ/33Mh.} 1\)hm*F)h+w+#YU$)’E1(!pr)ŐsqD".&E|Ϻ* s^+ u:^i}ч͝2!좗}8cRZm:޸Go<{Falm0]`8BS,`dU1ZAKI."{Hu"y[[ :=hĎd$D-DD#(LLý2Zc%wk\ZdOFR6!3(czfl $|qAn:%<%OMfE44-&nb]!Qاr=?q՜K'%Rvz))(&7̑4 Ud$V\PiPpHnCO/{z3^piu`4huI∈AXE%"$yBYtVܴ/Cިe1/f>3 n]7prhYjb3uA1Ouuh{lGm[+;}4e-wwmxxw/WWZnׇx[n0u}w*ܺ S %;W%i Ԋh(7XgػrB(|vH!rZLkO  XHE);c$:E}2 *Djv{o}V TlxWf C . a6sl!5 Aqxo4z7A˲gYH >3> ?c BB6lk)n=: B~hR.Q}=u:>}Η{aYwƠeIGNPpmfܳ$Z3 d@0 >9wt9FK)ӭ9g>wng q2_~_L_td6=`YsGmG,ŕ" 9pDs= =".?.IC / +NHĵo#a Q))&eJ\ʲw2 tEF.6#~v4O2yr}|[bBDyE0P+d8yp:$2y穱9"s1r\DH#EX\hdQ9(C"jGAꋭԠ߼XשE˚?JLv )*mݹ. Y/񼀛FOWƟBa[l<=7A,.iv||L/E0xS]$ƽsKyBŵu5). ;tںԴSߏãYKF1 ~0)>›J 8˔4̾`vw3{PCNQQɻk?Fi<}b,{\'N?Ρml8M Edz qTdγMN_MY TNNJ ]ͳ!:<1mm=EMAKgۦ1F=4cv4\o 0.+,sIk>n>L\vsG˜쌸\kb-nsT}_w}9p}l*h8 ױV#^kYg$O>Gf)T*]!wqH_`wu3^nU&\Of|nmv6摯?vs7ý9t&he:6dAjjv7&P-$F - N8hmg5sx簇)91y5mo%;y7]4nQi"5( ZSƊP;6mz;[I#Y1];eOWh%CC_Q><7D+_\||T Rb;ci?T8cL+vZL7N_I8''un}ld/ 9S:C҃>ũ\HXܪ]H,"S9RM+4sМ䤯 +4hвxN"ͧF|,[nb"!ڊې*瓪Pt)+-9Z QBP戗0-8NIћ(GSY$sڗ 4R@k>"mBD#8D([i%1H [Yg9?YZ+~#Ȑ觋dn^zn.ml^'0$7Od^s>RGIˍ?Af>^My*;qӊGD-%,FVTkԕYG+w'=q9IWAiݳyR'>1F3JI(MH2qd"|2ƗW zK>Fπo.cQ `x6o=9Ly4l只Łf/OˈBi1Ȓsx}p,lӂ̵h97ٗ"*h! Ɋzm`+B\tBQ9%'dU?zrV&nۮZ!So ۜ Ӌӧ;$0h孕A+t(:KIsRˠ!ܐVU㊴E uV'̣oѩI~b&R̥ rhUkjZEKB# }6SR0OJxRz+0;sFKd˔@̍Uʌ?39[>~j”ȇrzsz)ZV̝~E*҇Lq=:ƝM"qw27 p27 FBWsxSp91Zi_C[Kerel9 RFs& BRorZh8 &9ex"CβG-QI y@rm49 w}"*H'ޔY/lf3|&DK=&GxP6+]p:*ykQxW@It)~ }/Uwnl/޴>YoyD_2YA+N5w<;^ގٟƧ64|lk 0r0cAn&JA9>PяoU* $fĨA&9l"Y)j t 0 zDMXVB0"eHч@$rA.9̉ީ U6V#gGU1ޱJ}X%D_Gg #.,g2E$0*-eFBХ kNqEE2-3pbeQpa8&,UlmF2 C1|%MK堋jpd%1hl[٬831EkW`uyl(;tу cc%Ggyd9zUaVe9XŁ,dBȊ 隄d\ 6jR&qdT0V#g>lFdQǢǮWֈ׈F|XDe dfu Cγ T:YR#LB$Vid]ֆd@gB jԜH. ^p%_ "iɢ1:qɮzQT֋׋^|ɾJkϸP.s)sDØ#g90W[8GL^<^<}X;vՇ>婗+&Slȍ?0YAp}3E?*s8=OԹq@؀x r09(١r~g{OѮYA6æ?P ɏJ?Xʪ)#Ύ-GxAs^zhvR=XUYm Pd`,%M![*:k87$IPKU$ޜ!(Ryl%1Ґht\[X:Ϙ} gKig9 Դ*!&Ԋ=? O>cO4m%H$t$b!$IZϊz/HЂ8Ds"28֔^ Ngзg`W)IQA MoäM:UXutF@[׀~ GibF>VtpM"o4||Wn*0&-߳o>w5,>LZ~<%}HdqjC&3)F[2S θd욝-%O&xF&SRg8r =gJ c!AL8ju`Ų_G=ksx9>9ɡUouɮVꌵwB2)u\HiX&GNHAg|p5衎G'~=l8]~Ǜ?7߿Ͽ߼¾}߾{ZuF#0Xw$s/ۃϚ׿|APijoѴMӦfErGi;No a\IۏߏI,>-$]rjU<,qWlYdh*UZ*^VX.mWmGmU4Ev8h^$ɿr;IfTFK,$>G4ӑ rK7g>'w8,T^J_t8\(h"\kywLi&-E=Y:¦iQ%f}*:^9j<9 Sy;sʝ;:]x>tyBwb-|LCgj 9ryzEeT/՝mAj1;HrUF(f5AGJW̊"{ͲYaʋmқ"zZ|ΓƏDahj%H!&Ji,4ѰL,7]tW7i<4Wꄽ{V+ːmR$Er Ve1ۘ2<I-ntJC9"U,@f%E YTLFZuvvю7|W4K}vʡ=n_ $A] hhߣ9c|Y.BG F9Q2Z[Lq%HH23WeJ:g̩<x9ǽ Ekw/eLm}>+.2fĺ͇~+|).I}> -6{i={H)w:'QFT9-}~`"҇щXJ{H\'76lO'65LJlnVc-Tj~՟^wmԶKz/> ^YJyDm']2;L};~0v0>\]r̓,ewRcp`D'zc%DH%HǹBI$EI"n]{fr"_(|Y# KFWu>}T`'6U6" I#|Hn6!K

yBB{GCe P 1TD3:HY̑GL }%Et8dehQR~;bّgn^>(f9S&GsU+. ^ P`B H9IPt>E?uTiдI֖*Kӎ)mRe ~Нˋr~L̻@ſA{UQ uFqCT1c1SUmMyѶ6q"oijhqia6Rx;Wy`rnxyh;!.눫=BL)d|QLgSAMi?M]MHbLnZ]j * /o-iJe"aK(;uy[}]0Q}\&[m}N-AKG1Vp \W^VRu-T ?h?n ' U 7Œٰ ]W[%Fy81&z6swR/B1J)f]I"S Ad+!9eG-1sա]N?[:ᵣ6FxHIBlК.F{0E!RrF:U՞,M v:l᭟gw^q* -WhM3ۃ쳫;ÊmhG y|ڢ7~Z1w߁r9#kiB/=gT0US:R$5#C<MBlt& NU2A3kEBS{<1q#>1qg/˗/[ .sP%EAU[S2:#96scUN/(v"vH7hTFCKt+EvgM]r6G7.H |b<*4I1J'F 'KQ8Z8OĠaIq2ju9KC T‹W~US5ߑ%9e'D0dWRV1nVGu>/+lԠQ]s0#DuRAUh8YB xD9+BNV,\xa J9/]qmQE[ H\LEnjAno7ftTY_ Ni>jVϊ;"~0+ Uoj*G@Q}̐Dاwmu;Ut1^_Gt*yAvjUķz_ⵓ`[,3};\j?VgRʥED**MǍlr-Lo+{ ."mvKֺF$fsmck $G.u3~*3zALǂ_`Ar%%$ۈYIϲLJ--atTR+1¡~t'Dt={ғ$`/ZڏZ3Ow2I'HJQGFg)4:JAjKf$䛄!6cRkHB ;oxr *VS|ԖhI6;#gÉOs+ gJ-g~;_h?g~dq|~yeY*MgN"AZ%7h‚rm$U$"@i4|lY[1vAW /KicZZ6LWA 'Bk4% (.hqNJ-O5C,j_ZXw՚D 9"HŬKWXJRbpW}$ JՑ_7pښv$jCd* ?#GiQ.IobYe [A'QԾ:#uIcBTr!L^.qu;p=ٜI{6^(c/iǭfuFyG 5&fQ>Ξ|1|4`\"5ޅ<97'0-Y{u0Rx_&Ot0T^-rI8_ m_^q[Hu8W*]6~=㟕ɏՃWUl1(jA̅8켞[ns AV||tvzF[AHKO֞E[7lc71h\a`]hx3`v82rA6VrUs3!0+9ZO{Q*wp ӨBnl0apyկ}??~߼}Oy?]+zm$Ph~y<;]Zt57K׺Q7Ct9~"+v{k%@+~zLC>t3ZETE|pXW4"%4MTTN?YP.bЀ:~@gelvumAv{ҰDNp;'g 0$P%n-Pѧ`ZpH"%.mpC^YIϟt8 "D&HE6YH2@DKS7 3\CN':++)AUtݰ [OKy62hY J<%!4sZ:OMIpB3ٷ꾷?FMurzk#OΪ&g[ZJNݒ%q`C r "-*'6 %FJ2 ^v]⊚992:Ř'F:LL2J(,:&%4DEwJT9ۑ;]3,3B{O ͮfܒ~Xc4&ɯ_(p8Z3#v+)U u3r Fe$f&x꺭E O!{.\qa\٫ 6ꄶ19 ª MLj9ۏGq<;DmFE#j5O j"'T &d!` $]n#9fe ,zv[%$WUWLI$;u3us%[`0N0"麳rV=̲0x# !gH(:D'n"1S/i:I]~UQ XjEz[ĵ>QY$ B&2wʓCY nVUJ sujiZ146Nج4bg|`RI\f$:Z8 smW.N:Xg5-.vv4J9\ ,Kab4c%- d˞l߈3vv\a5Pg`žJg1}jȭ*{69 |mŔ`s\xGk6~\_i[(ءΝ^=.J!tu(rC:#J` ;)˜R̥&z\2C@@-WJl2gVap٧H2mp&-Ʌk}@2mdFFH]!BJA)0> eg@jQ` ύ1rCL{xi8wGy@G-6%M"dK jC>2.hG=>S4$݋RȒ3gŘg2E4Wd>0.b`2ldk\=깴g7`2*}\+dS[1ypzo&I r1Z7ԺiL[pӧ68AvH-j9O߾uEύ7w>+[r>G5ͯ~x6:i9|Cdzݼ7QaxRMex[j,9f8Q}8ėy-rC' %J>6}.kk;fz SFbh8V@RY&ɍ+frhL`DգGQ{F{RkށIVRKn.sTD«: >/\lYO,N$S!x~+׋9pez7fon>.GJ" "Z&Ƥ\(:dCXIü?JA'aZ`6CA0 n07Emjr@{ˇ( )BR[oc7\B=}lF9?XhO/m7^ы.6 _>lC|(/x[AT CK,h|}XBc.  e `UÍ o+z[86ۚ, =ۋی?Hy<_ۀ`];pwp}]J8Cd4H3fլccGmn⽺V&|~xm,dש&oH߾^*>9U JֹPKkR2D'R("cJ%$f ZJ kyR_]%+ЯW~Z}sa5}2j0*@vWԮ,hM-j[G]ݿzwpmQ˹u^jn9:N0^`TBC~,1fzBOO*?U ~-Ab]z$7vX:.r洛_;I pͯݤgVBͯ_bۃ!xn"܅}3HP!bq0ZBh %6dS]JY\.0K sGcM dᓦUĵ6*f ٨hw;uc .,K.=<4 E |x1ͮ6 t`ks 2#"9|."E<w, $ؗs ID΀Bb>!zӟ>)vQz /1O]NS\Gv6]/Hd9HLh;*x9hVD+Gu@r{J E8E(J00bhR}K>B1J匚@(#P 5k5T0u @.X^ yv]GCbsnJa .đ wl9b JSjx0*u(D  O3?t7˓{!Y6yͣb9JNgm `Ih/ -OT7+{6ALLЂ>YE~O @h 1#{. DX 㡟f)~ҿ>Y]9Qq(&vrj#;Y9BVj2g^vAf ܌Fow7]bGyO)i%jQvj<\qN7%~_ᚴ{r}7̣4\ߧB4'%:8o` ]G) Qs,C%Ղ}kӲf:>?Efjw:gLzf nFGVJiUunqq* 8d7d.:W+iBb [mq3S>8:ν|or`Π\ j.n[)]5#+g ,ҌoNFbۂM3j6Ɍ3@%་unXe!jD2H;ˌvE NrN;C4\K!qR%$d|ʖKlК*Ƣ{pżaR ISUW` O,*lo-w쿗p^ixz VʺwV}{E`^q7&c;J s+}9W꜉(Ns" XY2`tv^qac)'8gI$f㒲\eQ*kO"Oko(/կ=孷vV~1x FL1q%ւ1ϬP+IqJ֝$k\$K\L282#'nrr52s` &x4z.v~j}chakpE;Q L Ѥi7=Zj@Y# nl9Sv\#TbWW]\=mtO6z,+d</Ezq=yZ%1/|=_twtcț! 4Ah h0^E>trHsqn{K{gd2s"唂B=hib %,06d *qc*BF]R^V \J1ؔH>= xf1ZA.{g#Oir௚8[ךs*{&!4 Jd"Dvqn3hHUa sGu~2m9ʃmH2A%O19 $$5mn swmm6~Q$U8>p㚫̘"=AA/W pgn)rm˵EvA-*SPK4%5tD4yi[kiZ]xx]oϧWEՉP\_)'EMn@\TU]**7],*5('3ts2@CV8v]b^]did5FVQ(" lG @Op) ěU`̙Rm#cklGtְ5 u%Eilˤ(Ubήfnq?e/f8?##c, T A2@7 !H0t8D,$)MT`gqIQN_#:HَQ_>&`<Dl?vED2"{Dܨ(% \*!#,<8 JVKBM J$, V 6QzHL8cuD2CFbu,m c1:[]q7q<7 a&\G"GDHA"+1"qqx,xw슇e< lKȲfGnvUake|Kp~$sLuZ*"HsF IeƲYrF)c1PGt푢}ŝ{SsMQnR7eOw0;?/ [VlLA/mN ^Ԏ霡Gr|nXʄ2a^!X&HSE57V"FMq鄷Hqv>re E-M moȄsW|d2 )ㅍ9 ,l.J)f@60S?ꨦ۫vD5Dz[5vH2HHd҈DPC2FE._t)rU=AwKMjS&aJ;fJu1)µ͂Vȷo6M&j+,ut(A40ԭT=^MSw=5n O,R ,lrZFN c-$"NM.Km_yN5T8m;-::|,ڶGץX)FDJNcRJ) J3K`@%Vy6R$1L I}\~qKIf0%,쮫zR\"ȟ/yUQgeʡ0YR}v)F"r2A'RP;1Yë@$ uxV gE5!EsTa5 k$_ ܇EQhX_/cL6 _ pML$Y}Q}<;#*ǚ128>qނ.W eDrh%kU&g7ƴx;+_O5X~-TF YL;x4&zvb008^ k5uJ] ,9C%#cԥ/g_1KS|qycШ.ViTO+ǙL>;~/o?~O߾o?`>oz w0Ru$``~}=;]׫7蚵5WuMkҵlsԋ o/EmyMw v{k!@\~}9?f.]U$StrkUBMH`2[%_/Fx( 9 ; f|^e^z8SU"x $T]ύ$@ :Dw^MݻзX7TfR얦pˡuMFfc)@$EhsөOjvd_֨.Ɵ˷779mitΖ@n rmK~QMOc!q0|24K1iD !C"DΜeNjASYPLb:d<L{fh ڇ Q"! #K9 Q Ƶ}9ۅxu-BZ7 I:j:%ǧk+uyi:Wf'm2ij3]cRţ~Mk XRՙ։\r:Q[dMkRQߴ~7k0W@&3pȥ+p*Q)QWO8WXL \%rUg؀ZɱUpJ`ԵR`맙M̂E➳U- ertl:zMopQTp#_>n5i!B30Ū+0ea:QYs,0t`Z Np#$ \wgeG!Q)qWOPRugF5ͱ \%j:v*YIUλt"֬3pr]cDe-XW]+p ]ݏ#W}Ppu?*ɑ=Jpc01\%5 \TjvD;\%*kbzz:pET;W)) "D.&]D-WJ%{zpE&z?]\GE9! '@]ӓw"]˘nKDD ayΠF,he'0YVF\_qA1=qd&f1/" kEB(kB.0f tP[g_={rP6/jrٶ )*G0%[ :10 f1grH8+ʷTEǸ'q^[&p;=ByXwՎFa%_.awUr\bA(i>0x~k:9Np7Y6IUkIŬ3YJy}+x7N02 R~0Wh*d${]" 9 _FUU-'-dTYvlr^kai쫷? s3l1)J26ri/ p0 |wC(ARYrG͋ϲ]ͳt >[(~f:;];p8mv4,R3EyJ fw1c_&`M:q=n*ZsǧIwSZ13'n3;Pn5*kq- "ؽV֬0ְ8|&fUZJX&ڕyϲm;~h}?dUMmiDlŖb&ڰd lxX3IKikl71LU]M:)LoU,̆j^'hN2>ݭӤ0lC1G}m>ŀn>OgEWCM)gKBPQffktcCy 36 ؗl< g 0Ӿ3RJ3SP2P Z<.Xˏ3)0r]\٧dUg*":B/YYYZKկ^Vv\-r!\yEnq,S٢2.˙xs ZyK%T >AU?x%R\>]*zgړ#,c٠oE׃ 1PZ캻zw? grp1k%0v9ns~W')8P_L!s E XYߍh?)5*pDm|mY4 Jdx1{^x5 ]ezI"U[X z3epmWn/U}Zh;$lՈ7Y?yS<{)zULH^FHOżArtDJ&hfQhj:{(MOmv EpǕPDUq-HKyTo kD"9G,j͙Brc.OR@ %' l>19yZsR@ԉye41fg\'dƑy[.[[^ _L_z>ʄѽU"kUٔ^ESOM/EQ\QǵuzP-/rܭ:Iٽ u!~#?}KsA4ɨ]ï4oM@r( U[S2:#9VXDswVpdW)tWb"vR-)&D6l^oΫo1$e|v#~?[ΉLleg 98X9pε̪8cb<*QQGϭw6@3J\Ѹ_.ֆKϒů:hV{lw 5]aSYlK8>,)xK9+L2n7.\1>4u|?TlI1 'F ' Q8Y8+,[ИI_bjAڃ#ʊ{*Z7( 2iĄL&RJH 7* qR bbA$eU<*% !:KGpGm`*Q:D1!m?FvX.toq$ݫQzJnW-9.C,ܤNy>&dzC0"2hÕ % Li)r$֞xb<(F­J0GuWku3`e@3&'I֌I4\QkY[#gY߁s̩\g߬|իgNuSZvF9LQ4UZՆwu_)&SW? Wn;!v,7',K99YcRƋ8`Xr$ Z+p%M攝 n 1` oCLb7H dnh{!qІ Dh,@{Ђo[1 B2*ZFWkl^wwb>͡j`Hv0nzȣs[(\s/᢭ =:bѮKK[WæiDF4:7Eg2);w/6Y0P(AvPQuP{N 8p3lS}{Ƞ.EMAhLqwr^B*wV:C?k@\@}Y^1opE94,,\iA|cdsf8 H,V*y5l`xPq㺾Fè:{V=o[BV%?!i3D8y U0MS80-,O%҂ 9- R!O l060q< {ҾkLZ (:Zlz'2㙊4 (/[uiCrt@b(18$%ՉKY)tk6#rMYc*|;{h:_D5 'x] $0˜M, ^ rі*)RMQXV*Qd%l߭wyUkMqf<֩ZLz{wquZ[ٖŰgȴ.HFFHPī܎E51$!%sJ)=ɉ]11qP@0H(,:&%4DEwJ*5ckl׌atak.']M†ܷ$#5UXW{Ֆe@MWsJJU02e-AlR>Bͨ1ahJh2+ݣ$p"#{YV\iʪFж# J ڴ[#gA\01Ek{jmFNZ`p| LPk<8lLd 54"{J$Ayo$h!#2A%fB: p>&ǀF5hلٮ+~v:+cшǾI#4F `TA(dS!2:M3DR`Q ,iCŊ 6)MyԂg|P(%R8&4wqiY#F:.ڣ^,p5.W/";œ^h#I(I(&:"AƑII\ D fNzzXakܱ>-C>< kR-pY[61[VaApǧ~\? "Xt0r%}LVۧt0*e:tnOzIR௎6Sa991Daڋ}fnY:wb܁Bƿqy3Kj ȱ 6'6H~+cks B98 LD˔1Y  yMwC 6x&9gvppe=y\6y\<.Bi~޳gA`cQ_̸|*HQ 2ހ9aIAѽ`ss/Zۚm͙ڏ֘{7 I: DR괈8t4:#ba9SqitVyp*Vn46ǤV# '763s *V-mo4FΎ6CM hW2a[7SChqk%ѝ>޾eSdy3u~쮙勪&4UϨAbwjTH` Yf"rY'&Jlǥ_uIO2Ygiٚđ60%OAhftNfp"+dFSQ" 2KjEGACb(d܏0Q] Vº9 `$A*f]28RppC%iHm\ub'avNXKNDmL :h烉E$ %I1(1Z3pM9 RU@09ΨKǨCB*wJHOVr1)w?!sa:C߀w,x:V3!:^?.d][ƝM F4x>BY+4l}WIsI("=򄠫D`2}ʼ-ΨAc ;erήG8DuZ)a<:cc^PvtƚwwOM5kwt:UН JuT_FNd@!. }$Jx:mpt^v0@u !g3cKiP("=J" ItF)'s75EUg).[»W^9I+Trm:ɔgH )ctbKWϥq8J2[ºz~hVbKΒ!^WʧGwyf~կotn]x~,K9|KcYA ʆ5gݟ!2y`JqcoߤN~f`AiPDqw(Q+%D((]Zޥ; nV|*,I0H8 &F7{̩zߋwͨwkTZTKAH\b Th M-O]]Odɑ+hֻ&##k}X`JkGkn%=YP\(Upg4 ü'fDV7;˯&j#"7Y{b\Wߗ-]If$ńAlx۪JfU%¬iBb#Pgxj)$zO.+Պ3&h^E$BRk!DF/A5w_y(iF>+^׿ֽ 2GTO({%|BdO{{7Ic{-bmz@XԸm,r=v'ΆO/wn|>Ndz+xkp*p:w\wNU\EұPg ¾bLT_ 2`T*U<;mz;b+8>J >]q#sI#g'KIIuQ9 K/J"SM8eN0Lz9i^Iƺ PLT7pck[a7m%%S6(d' F|gI_ү`pmÎB-Z.+BnAHF٭Ym9eb?`ŔK>U<症eA虐!OبUȽ73U-.H/>M**d1Tgz(q[@p1rWȂ7A6HB#ɥRIr>%͚A Qe>&귰XoRݔA:zHG#r 9_Ļ//`Y8IXTR8s 6eJ$II@0ĉN<@*v6a)cF[G6nQ+f }>ebKJ]=د_Z6{o1__4夘Oyylkμ,|yzvheh߾ҧ'||ݪ/{Vvu yNפ[ c?^e_C3?p-S>+z/H{&xu>ňKqy'70TgPIY0z2K2ݪuJ=Z68r5=g=l FyӭGM0BNo=G6y_/ZctX;D+"Cvv{:'[R`l6C1pߦ`O{?C!7xUJ HP$FI)1ؙ6JhU62MMqrjk㹹1}駙5 /ۂ~mbyL% ~z\W^_%١P]`w3%EKf;Cu)-O>`g(MN5+~]v~1ǀz>s8B]iJ?*I"Tn] Ȟ2z쑽D)i.H#.U!Lp^{z[⤋v}gd.8ʎzBmzq֟{->XuNp&Rc fqG".ԺAJi-mԺk}HMVKK];-!6nWKö\}2hQ;W]`; wi5o;\u)tvWZ-] 瞔}sbʳ^O{_ ~bb ᡆR /҂D_133{ o?}kK6;]\ѻ]Z+&0\9&Ŵ3pŕ v.W9CpH \uqU֨m. > \vv;쪋; vivW 7Zz>)Q[#mq)~`Rv]9\-)KOUUػ+ \ui_ >FMpᪿy4~ય.жUr&z!k>e|fWQ_d2v-,sHIr [Pz: ]r;-8OKYvu J]gy<]\9gZo/~KXc]gdx Yuqۻ[Ja [0gfvΓf.{o1Z -:X9 rPT`k) 2`۩8i͟Y獧}zs%񶇘'cdO\ӓ~SQo>Gfm|d=hD6RĖ5H6SdGFs ?i|&RnI*鼖S$?-J؜nR-AoAnq*hXJPTLJk\)&!_Ya )ffoFS*:֍qa!iNS֎dьARwOUнSZ bǔkj|j-X%N%c1TF B8KJc9~|ҥ֌%iELd8I$JR-$T t!50f,clS֭2dɪrQYW{h4A%~/}20d* !i)#lZD3IhQSJ KQMzrBê Қw-81[_+_[W,@!M<8zs;3l#mْX-O@ɐ>% VksY\ֆqsPVɩX]bf9UJrVWZ\ V3YM9il DНHD_G8:ӏkDe -v))RPkUX@CB}b6ՖMpm*$UdPXS5_̜3{QEOs(Z5er1ŲƮL #1KBePX ufIQp!ޱP7W*KI ie§`4.ZAc +.(՟tPH̯$15P1m$D/=nw nXs6#jE||:oma"czR35}r.RګYLKUT;fM1TD@`D}m; v5,z뚭h*. ϶dX=,n޹VrcW~KM <<~Il Xd-X-uZcg>kN>mtCvmhϭVɏ{\d] Fl3j%`|@ppi7T ;<*M4t%:l4>0p8"GvuU,R{Bz01#II&JyMA+C,4pOv2/xgB3H0FFnm ĝ3x6Ä>&Y]k$pjBԏ^ߨ yfJ| ړpvB6YK!eS!TSQw5 Nnw,]'ˤ q,:ιp͈Ck3Ѡ˨=K{6 k$!:*JȻK_!qFP#"V^,1MVY#Ydf%sg(Zi֎ު1!7ǸX5ݝVe@Mj6ZͽUS<1Twmw (Guӛ .f|g$;T-@<k"Fʖ)TW9 a](UY:Mxk1i3\d*\bqs2C3fh`˱0ky53!#w )wWKFk$`M(HY~Fij+50&R# nzB/狇YM u+#zҁL.A|2.Aroofo5Ӡ v`dBvzv{yW吝u׿۲}/./oٗ5 +k^ 6KYEyi=Zv  W~ W?peyīyÕɈ?t+G+p%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J Wbճ W[d,7n W^+7\A%Y1\pJN WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\E WW"dBn;: r)tcZ7\AebZ\p%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbՋ,p }X׽Xml+Vh2!)Õp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J Wbګwtz 滣Ͼ9|ôa- wzyco8*\1(Г? fRT1aPIr *a)zL XW,׹^pjCvVVjr3.w6~thyS]pD~^c4见9\ij<\T󪩏TMp//ΦGm{i)!h.}3OW5FGK{N{:+b2l&ꐂhU.h)}Hs_nWf4nL(t'3)>')m`dyBHЉ䇔<6ϵ1 g(|/)6FmZOYb0Fz&~v/;у<nG=9bk~i3tsn D٣Q蛿}V<3wܠ͠舧2HRLH>o?I;4ێFBY7`&: W@y{OTrW6q*S\W1#\`ˍ)$UJtJ\%gliɚnpr_upj}bXe~dcvOu(\- .\Ϳs#L󖩡Lz4Ԓm~̎U ^!CTQUϘ&E՚pJ" q#+EW,7^p4]-j=J({:;Xfz[]}:2,zs_ψ.ln^&Z4̂]?fBiVғbϧ4Tn|BP:UOXpnpFuTZdmM+z\9pQ \P;mU&#Z!Йn:vSH:1ĪSHO޵XOWȢciW~2WԺmLekiHpԢ>;[X.^pj_$U6#*z\ۄ|Gb1+VBxj 8ej2D!z\Jr²r h\#wC~? ,~kzZT{ޢR{[gQq8?eAᵵP7)6%KjSP鴓{)vNml kC ]e߱/=\lzfM]l»|U~oE ^e\7l^>v$#c 1h5fSV.~_7Ey_'tϯ>{Jy/7UN\o~?z;_]M\_˛{8/oEhP\/wOS_߶Bۺ#MM/z3pqMqyҥh\`] #-裙u1;Eh/}{<_NC~zsQ ?ѱ(!gb4oZ xpdǘ A@ 5$=פ*}(fn,+ V_|WNP!n_a ;pg/9noWʕ_o4tqvvOv{/|~v \o2IܽlV[t 3y~6 (]cvӔID)Oh".k dޫyBkqcs<?ezs 1dRޏqz.Zc8G79KȦNXT"kra8j|r(IQڤPTx ι7<]'{W۸pwK_ ,w|X/{ j #K߷d#JLɲ̌Gl5ͪ穮E{c9̑cs/0CN_\]Q/\'_|lwG;Y,%t:4"6jP]Q+S )s"_!pB-ox͡( ~v7Iۨ3E5xOkyu[mF[ڊ^14bm25zy>zPms!ӠzU,> QhxZV){Yuq=U;],f%*e/zm!_9|73%eh1.7{Ùk7k]b7?k\/(q1>aŠ<؄㳬BDt'Wd <Ԥ\l|y GՒXH-]BkE(2.;!·;r/[ $ ((0#'/-xԈ(x,u1= "bb*(51^ZS(HB45@Ee@#Yύ$!JʘT&H@xF%5?L_1rty9Qi˟],\F؉j]aࣇo1Qr#+YY-*~K.x4$/I,@Phia"^𲇗 /WiJpH:$ qD b=$yxD^mR$vqn (w[ %QxMnpR9u3qZ#b8𞟌 7jc^fLVAizk{ݾޢu:r,Ņ i@,ܾlQ8BOZ(f6]^? >eH* l]`sbz8@6xf!,eQkkn W=?5>yڢ獖a>ouu3$[{ʶ\~@e&g7SY@\WM.Tmυ 3;dznFܜ%_O6y,(fc;`t?逽 i:`\* IО9:Ȟsr읙KcG؉QtRDE*`ZdcQ@'%$nY0z(=0"ALjM<(ÁDVHu mm"cޙ8-Y_/k==jihi4{ڑ}?y6Yix6n~ ޶HJ6g#ALGkT1JТ+#qfIʋU!B9 y-hf(Y̙8 Y@z3qu?}cֱ)x:o"g(ϑ]tif~fY xK]߈=sZy7r1\f1z[,nx耢3 )¶rE*@%0cOQOl.|I5bZ`6L=qC;7\@ÂNR=WyޅA;WI,3jH]^ t`C J4$O9nk|IGn[VpyWn7/_h ys\iZ`@v}lK,z?0Oِ+7«ȍ«b:fl+ea8.Ha<]F*aY09BW g+H?āz 9 ֽɺg$AR>q7JIH5"c)ExAGf/ds/Z#}D͜\ړoSx߾e- _#y>Vg$C7>RPA}?dqV?n-K)L_VC'Ȋ{MקƵ8'o!oͻ*#?զt֍ZרX4WYu2pzGjz]0G3 +54rgsb x`O96qK[(W#sןn9ơT\m .t!Vuo W<OäS] M>կr-.XU3ߚ3d~*Â1bz'K OJorö{fFpɠ9!Gz-u)NB9|y.Rg-ךROmZ 1AHσOZ#U!2Q+Q3=Xۘ}04O=zH%;&εzGW;NHIJd> A7sH {t\'_qŀކqsŏ)]e6P*$EcTDGx)t_eI$cFQ i>3GP8,nm #T\z/ܬTU?򉖯bcqQdG0^fQCA4Z=:rfq[HZގ<{ЄgmnAc[9mß^;BnCU_nHF/eb8,pSJiQmyݹ nl_mJ>d6 ܛ 6='gbҀrP)Uj.BW9_g6S$,1inNg/$l-N)myrZwZ(>WX (SS͔F=+h'~ҕuT:vaZug&3޺^KC۝Vhmonnj?ݬPǟw /W]rwu)FO~g[=zs{4m#]Eew[=r"ʽ|M5$?}|X6 h cQPVP1 kԲ+I$ZK㟛kP/YtQ]++"@*պ pQIET~ksW.V?DI i%OL RzFAR@厝ړycqZ?/M~AYpۑ؆8nS݃M.^?Ĥ$l7vzkR]V l :y>rj-e ^#$ LŦdcK2OR i"0djecT֦B2HT=|#ү4:V< "RwIb+3gi6ЬӤ#͋Ȯ<4Y20Tf%S L|vG`t N`L0;9ӊsMoO3IFYV.]`,yU^I5`q%7RIŊd6VޭJ/.8pIP5\E)Q~|~rLCBp psz\~CY42|+Ω I\r~7$Ol:oof_^wC\\Ie]ٮMM~5>wU؆Ɣ|8MdLLfX~a<֪wi.FG2AlL.3)MyژF2-iFJ’Oס|͔e&rpcGTMU1+U"61C Xԛi/I<;\at,&_|kA>$S(5 N`A~O8,7lZ?8xsH1R +&ڔI,34 a~]unřk&3Gm8s۹c{N77n,enUu [w4vq&Tģ0]EdWgi6-S6!很堰S бyHskHO:hv>4NАVz]z`Lm _W~<:DmEOi!/c<]C?݌cx4A~!9 >4rCAϓ_'&<_r{vn?㺡My{Mo,jwxx|rXޕ5gm^oO Xh? p=yZ Pۯ ]n\33?<ʳ9o y!cs:fȰ.Oݾԩ[SȻEw便SeF<唂ÆڄmF`rpV;K!g[a%73A&DKLh"1S/9T'՚y a Bc_"BD=ʚ'i2Ȕ)O.:dh6QPUdѨh-uhԬSZ8IfG-0I%$HPh4>w59[}q=qq:\4.uf%r["\ŗR)&fHI3f]ޒ Xd$uG$HWǃ+5yW$XWE.ZpEj5S]UQ1J \p3 fM 8(_LfOG? .'i8ͽ!%0` (+o@I(틞z0utuEޗ0ߩ>fx_= -6eFpŽC#9igWu,;Fv;}FW$d"0;aFY.AyJf28rg~ccִz}nYW+3% m3_P8?eYrpxp{(O#{3NdsMY{2Co*ռ,̹JͿ:t6=C-Gn|uύǗ?;?NwW7.O2weV*լ|hz R5i+F )c_pFIYU̽&*{-ΩLWs3p).k5>kZiPzs;P" ɝ {bi8>G<̟ﳧ|2B{et˿qevnQi۫p5M!.CNM&8Q@Bqx (q8.-ctIyo:gl! =>>S{FSOSWixj1Z#M1ZrV>g(fJd?{;pȺM(KAnx#d,4T2R ??!Kh*gs#Q1{WU[>1pl% Iu|շg RyUUuP ih%5VwAr#钍5u}gtsaM nN'Q;Yn)Ǟ\1c46+fLȨ*B`- VRjh) AF5KP!RXuݷn\ D@KA\s}Zm$Bb ɨ|W!'c#ZZal%QEvt:ZaB BaGhݥ=з6*˂Bф i!ёO,ޢ Ә TTPtC[B Z 4BsX?mm?83 AL7ZuC Ő ǖQX[Ck9 q'YgeV\Zj莬Nc[ 3ƳR#u˶ p >܃` hJ֬G7lmxg0P6Pc.roPPCo8*ȦC` Ü` U\ *U+wcCȤh*v6l}_NsYbpPTZDJg=l4Lb oDx,8h&j! W۞dWNc@7CAw^KCp2Pư)0`uck!$ ( **ڕ"M4C:b8;k\#YA pf5ڀJ5񥷦:x$`A溜7퐄M٤$຅> f% C渵˾0>|nN,|ϦeqggA!nn8c`3 =k(N ;Z,fG:5WZsLڌaγF9@a[7n('g#=<.2#lRӑaQC^$u-*tyHXչvE.WswXSA CJ@25ti3 A)> ;fc_XA|⟻>?Hh"^S:Ԇ&n ?h;:YOPQ Ӫ #W(a[T1@Z*|Tiwus[`pO*6<&գ-М6nmVܢR+k֮UVm(|ͤ@L@rZx%}xd󞃊 S/Zi.C6(#|i֚&6X wbd=pe6|]HhJ7a#.`l'aPJ↙$5oQ8@.JlnT *vKcbX`XKEw,*f-K#80ubٹՌHPa]gQEQ>J+e? ՠL<{qz UW8t҉L>& X]w0?{&kķ+f7ډAB*Cd~~ v y{u]_zu9}yڟU߽զ_7OE7/8;캷qϫ7o?=eEW?_+}uGCꝬn|vzju]9OooLcޕ:~z~8Yz;n/6'Ϟi*' W;m6o_wyғ[v;dwYQvsוw}zNqc=߮v[ENS~MĨpb {@x%XR tt z11~Fk>Čɍ=eʇxƋ$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJWՃWA{<3`'\p|2p]NW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \I2/)p.'p~9+ +܋Ix $p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW_3p>-(pŀSZL pb{Q F$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJẂ |Q.6Uz@|<ZL> h:|:R{ _M}A9 `h1tpBR+G|9`u ΁)ozAhV)5)aVSP{*rXz;ߔypݘGxqi&C+N=Qvo ?.?m3LI'Ν{"ߎWq_M{$-7Ŝ.|ͣ?](i M?bDWv1tʴJ2r1U{ ਗc]VGFf<2i;*ݒ C,7,gg6ЦxWR0D ]ϜzT̾`VW76BkUL^(I%s2BW_:*K+Ka)th3NWr ]=2QDW xA&Gi:vbǖ{MF} +kRѺpt(HWzj{z`vӾVow5xy{׼ñj}? |{v_7ӺyvWW15"gC+dq ^%Sq0?`;Yo憌7;]FԂӵin&M)8j&X d|w1 q>%—$eu(6! 5YkD9qG(<6_M1ΛY=l폦+DKӋʿxP_)/(ᔻ: MT|Sn42?ߨC3d:`/XwF'~5斪JSq%qmɔ(,6?SXlkR,6-6tJ,#.> bŜdQ&[cJIcJ/{AC {w-i=4åfQ;MSZhtP%ju"~D5@׫ٻFn$UKn[8\AYҨe{'g3.XywՎ'hx(XJ)pfsz$̼:j>{Jp-j2]tIQ>|ǷXux]H_I8SKW$~!xH"7q:59"sC/!X^ƓX{>ީi?@I^6Rߝ+<h&SI %z\'x"Rַ,yIzӮbYqʶԅ{cju'%.yQӿ \k2Eٝ/Oe:5oh9|^rPT墅ŀjCZΫYY3×GZ ̥$/_eb?Ür]3v5x=#:lvu<5ۢPgUo]/ ̇ȍv2?!\\ ZGK!zƌƁZ 1b"iJj$~ޟ3.b>M[CxffSY|;[6-j@ͧ'Ebm/ ;S(vn=0ȅoN}97 ?G'WR M9j.^z+ѿ5iիAS~MSī `y%L0nl_M:Kb_CZChs 4O{R-*.:oF*MD#_hh2C#t婢 v?ò.:9GurAϪ waڹzXra@3G7&WHe <F #=r.^+y) xr] y .`k U cJ#68R$#Y@ c.*5Xb'TDTgMU N] ϙ]0zp< 3(SՖ:ڼ ա $J: ab? jR=JAi+ܯÔhZwqɖ.W,5ޛȆ53%vTmCҷ~ڢH>>w~Mu63k{M0F<&c͔2G#gmŔQ1*HD.b1'K*Ljd<)V ߗh[|[_O@GQ}&iv3ɶBvmP f _AsAOA@|n$  "JːV!jn&3A˩TEXꯜ$Z3>,I ;8&]C+f{hs*klYzn _EPNPpjg%=Ɗi*-R)0α$oގIv#BE/Vۭ%t|{oc4O͕q=X#*XxxW{FL|cނC H$X8fiYO97cS<qi "B#ǣLHx Na,QVanZb`*1ǢPzJZůr oY)I`(E0f,!9SP5X{5 f X ޮQA[XPA꯵&PfT܁ F刊`) `!ӲFv ^^OFil8 xbl²`W,Y2 ;r^m@em@G[! +}+*W2/m\TOၱicg6(΃`3+  #/eCԄ#8`FH qNAE ,VMŽ`rv8:=YӑT ;sxfمV[Q{}W@[Ǔ1w c2-Б! 1 >f$.45qm tә(ڌ$X #2z 2jfp!.[}2O~h́uY\{_:ކ`߹ag Ҋy +ENgkB ·~ܦ/vؐ1&f>!nS~F(i0fW_U[h8}uu=r7"j3~9Tmad1.cX2DҭjcOqXREav kzBQS]5v)_JNV@T],*5V:Ź !+p;`{s`U(wmIgw-9upIY,pЏj1E*~3$E"EQCljuUM=LN)tؔpI$N `0u‡ό Ѣp"tt3ĜK*1'Te|Y6c'^*r l ]]%ɶc4'P:#fmU[cu4%Hb: b!$HZ }$Ik*ѥ !gЙ RQ.홴UJR='>:Ik'b*Qh?6+~6؊R`\sOCݱp7u7]zX\[z_bϘq$?' *0$O0savdTgI '|kJ0!jJ,)80_F "/@^j^gJ c~7ߝoG>a=T'c@2%O?OOO..I)`JÚ(Zr6}2b~kcΊ?>UjΑ]=MGŽFk7Տ͕7W/^-*gCR\yp{ !V~w}U{_|s8|![gt%_b2|2-]fYޒ &j,XNpqѓ57'@.]իnr٪kodR8"h¯+8ިc4cnŤxdasĎ۷g_}uۗg7g/_=n^o^ҩځ+2@=DgO ೥G,-Z֛--\gi]O7|uk󖗬brzmlHwrObI<8 z-&Yt^&1h+'8wi}PS/R !te [GEctWxķZIn']RE0I֒}@[2K 1I X(۞0uӃH< '4A`"Mf,M:ܦJtuZy3}h'6 1ma(hӷ;{04x瓛.AXe5ָێeMexHf씱?R.]u H]$/mL'ud2Sd qJI9?G>|? ϼNE` YMB϶ ;t>cm'ia4wLӣnYl"LHCNE,/4Jx /j=i7X6u 'e_~ѓ[nGq}li/G7j!<&uT LLN;^5Ŧe_ٗ2#/K 99u{5 n\IdDPե/ˬ,qOc.V4U3z<HwFV4rVhiWmg3~xi5f^ƣъfWW/ux~Bws:O[zWgC}s.c3ESűR`%JcKԾ+o*)ʞ}af?"Cxr=Ƨ3|$jy!=jgΛQO䢦L3AUz#~-g}su?{}5u%(7HGлcKOmjfӵ>t03uwk&?^.4[`fW{MM(x8fQHilbPTqKsFxt/QS/Gj]> 4j!VFd2V|o-P1mYer,Flmg/=$>RuF2x(opV3稓!,򖅸5r5jua.ұSB}T=T, oY+AHn#R;ֳ[:Wێ\m4vjfMdمg6RU @%=Su ճUϲ]%)煖fr5(F(g2;hA-[dv9eYW=k]I#Lj=c9cVV, N:$"L - fItZdQ2;a: 6%*m;5r+ ZK)2})zȤI# E(ڿ|oUReBqޕ|ہwwɋxֶn$Fx&W-s0^ۊL%>".Gxwq[$f]t+3cnG9e$k?#6 '')٠d=CH'jաZ꠴$ ]H{7F;%7F6M)9f`lpX'H^Ma ԣT4{) jd. 'G<& &eRYΒyr8FJ!tu]ƙjduEƤ*qީCݛgH n'}|t*nTL N;{@iDQ`, %͢PYv!Kr,犩BW\+Z lPmW_ \iAU!vUuPJs8w*TJ՗W3},ŗBwn)+WW omK7cTBߟW_ƣfh$kQC=pV\+!jpGKL:.j{(0]zaP\_ L[Yh\~0Q J;\*M )8 *?-*:}(pUy*wWrG/0^zEalp8jG3|\6~F =&ェAK?5G%m3hRgePixsGO/>CI*<0~;]÷ө= P-9"8]Ot,͔gHWWKnމ[ KO s3s0r, Lk0UR_m +$׺ry%\"]B힇}tOO p|}lgYD'p EV}!PhѰ2خ6*,և>Eo;pr`_)rwy:"N^ dl2A+96%}"@"qӘF1MH2=-. =y?xa #4\ >J>( g%ꖉmHNLrv6<We gWO.N&wOO>;@ ΟS3_EqueJT8"jN4jvK\g5hj4/v6Fb1tukC]4 r88,Cuw]e5$6 #LB n{;=m[Q-y ݩ Fs ~n[HѺ{c${[kv㶨^:׺^197gbҀrЌ|/.UOmQ}kLԖ9zHH\!`B9KySEhLRsOq^y`YaIhL{(jL|[-\p33tQ׼MC5wmgE=a7mf]VȦ3to`֭dg"d}=5P' ƳˀʲfJD#c.Y!8"{ӽc!҉<!F__PGd;HE`x㭐 a`X@,x㨏H*44VQz}:+8PR铢`lS!Jmp-u@$ћ( +(M^d TkC#u YEL1.5.y+`$S3rv +]\Ρ|V˗Cf}!$r [ruZuv皃>^Fr9^fjgre,%N y5(aiW8XAt>g/ 3;DH@'YFIb0II&@$ʮ&g>4߬oz@uJN99C@ B<7<\9ES7K) f* ~x]fl(C_*Z8A,|go"E"RHxb ')y`NEo1QrUzp%Vڧ gw6|DqNt]X6JP+54rg2x`Or|hj"!W-9xFT\&yfx_רCB1NտEt?%#Ӿ)   6wxa~热llߓ*`8I-dnbUK/kVb6Vw{,Se4A0gc\/>Dc! r:@z&(As"+P%Z3+r={Hؤ/u0"u r 1PKwQO$шG*V hPiK'R N:M-p}p})aأAG IV^o/ӿt~-48?S4"G $*PSBрHG9:n:2i1(jNRQ'Ns:21O'^(:5>noxjLS+ccy9z(> #32"XsYm8 pmdPm/oގIvM3hh°|ӕvgŮX9mo<});@U qh$ }b2.e*;pR1 +B  R*΁5#/ kPGQ/BLD^SNS$FYS5R;BDLCh͇J㹽%f[=s'ZePثy{R݃>7;:{: { Ev\Z',H @dR r Z{; .JFJ0Cu Xj#P\.it*Ř@uNH؎%3rKF x:R+_vy,+z,Kj J"F9/&8kKI"FjA'#!'cILS}G//N-eęh_;AN/lTc2h=ɡKO3y)7xMka3R{|:su6'Y[Jވ^?T]]Q/ -U)Hä!57:Eb xC|Ypv>r:gL{`*0r pTu{NM9gA`ע6m.r5RatFjl0Fc>ޓ{`s-fWLS-~}"AU@["U*#b0In*PF"sHq8&6Fj$pPp6չ/QȄQ9D4Ct|P9; mz4't L I3ێjVo{akY Mg]( ؈؝)$"rYv )gq [db|\Pwm۞ݶHS22OF3:i 6 ъR- B\d P KqNJro8z_1 yna݅3$ 4d] QHfl6$%fwȹ!JA}2hʞ˛AWy8JIP3$$*&F1̓H=r) u+HWԊ8Q -C "9&Klnʄؽ=Fq0q ? OCn4сBb`}\k+{l0cӈd]j|?, %߿ɾr{W*KB y?!n"8X+*>e>N(Krr$[nN*82Esm$Z@JCDg#1 剪zEn WS\K(g|JsbC ;Cu9 .mHU•ar7on5_p"5U9Tt9 ,6^^q[H~R8XSs=Mk}۫yŻepr ~hxv^-7k AVr5 8T: W0b'|1=ZGeQlmFi}+$evǹ?&S}UJ >'t0=w>sC%Ŀ ^#;~ݏ???>Q?O~zpfbzm$Ph~}<;Cϻ_04t547keh嬛a\Nr˸w5-W/J88qqX {3^zÖ]X1A b~Qu ˁoQQU$**CڢSu6ƽ6v;h=߸ē5 Łx !>IPBC#H5۞0WyE%= x\'2|%*4oDH•΂@ۨhiBDtpa\s2ή.ɾ35t+eg)NpOy{{sSCI y&4gk|ug`X'q<ND ι+YRkل@m*#-.p^}Ů Zخ|}/f#gBZܨy .oG&re!(P(-iaa+)9;T3}u?xp-9Zx. 6PB Lk 4a6J':uҩm$[31"1sHD Dy &g B:vuFΎ{ULjXcҿķx)UK`%hϢPVLb 9I"<:fCv۪H>7Q"i͟BӜBhEuRD[tEvAgӪC 8-\OX<^UTxy{ڟMo#\Tk8;*:y?IyRYJ/%SC r1B4g3@ ¢ 0I܎Ej7FgQjBH-,n:$N5Ҥ & 6X[J؄Jk95cwX3]g M _.\P6ȸC}=ƨ)}Sw7v4\kTz:iŘRĨ1ah giҊKQmg%y\lwq& a'j0A%Fٻ6$eo/I.6 vnad9~3|)I3r_OW}]USc"^683 Æ֮M:dmFemF:,]<jUܠ{ 2Sdb5!?Vky:Wf?oruwn=u_#hEEy05Ey.0Ӗ</4(OFyb+jp[{ uv aHZQR8G),mݒֺAuzKnk)RfJŅQ%(,Qmr^p۷r ԗzŦ]n$ʀTLNsx4`lDR DR%{xO87Z{DFcHAic ׄHt4qau:hW%]9TAm+ 1yHL60i|$`4eF="dxZ-&()S($V溄/4~iǚ`,I]>t`iDۨU@qET N%$,9&O*x]iz̿Y@65MmҴ|X,Uk!?N#s sa8=-$ogl\5HZR +Thhߦt) ejv*f<=Q(,O2A)FTiG(1ॕ1v S(.bPQFRl0@N%eL*$ <#:j4;9?"<^ F2tq!㝮ln}KE~'LlO&^yDu5>:LLMOoE9( M=-l^@$ЙyچGb߼OP5{jj^C D"0Ӭ'b$#1;/@﹓)Ց%I#"aԒ.Fp3t0_9#s~ ~^Zw ѸN.Qtr:ZMէysst>/ |RLbB$TIS<$*9(cƌMyoq[ףaoIY>Vf˰;GC&nUl'\Yu-d\Z+gJV/Q XX(Cvx/G7bßNвA:~{|vkYٸڃkvWJZ ?MO&T{3U$zMY{v]"k;MGbl[G6>emk.[UNt恺}nb[._,ݿ]Nw8vG-wd4g9#{e$\ŬwXr˦;[lSn6'BtѶg}WtyimR絊c%%/$ k i:`\* IО9:.48N4QQwXbYTTQT B H61eJP"K9@S8 ŵ@$SBAe80(3Y`jDzM Ξn'b<}ܠw)!L-ߴ%gJ_|9S<)8c>f eJL$.}G!# ECq&P82GOs,P>Qxeu-*?/?2gذ)=>ݪR7[\MLR8mjϔݞm:~/㻂4wgAX9)Cvj=rҊ07V7 {a<!$Lb6ң%{yÛT"hĥtތgZThSY$_-Z8A2| /.r*7?LFs c,PtpV3^~357"^s#U4I}{ݷ|޹" $=k_-5 x" :"pj9+ϧ@q99/lTc2PCg8V,R\ৈ<1Πْ9VVi.ZY^P&5`, ~M3BMۮI5I2\ڒ/} O—Qj$|B"BU -t*h:]eFwt JrN6ݍ0.yvd~!E^nD@g5Ccg ?_>7ͪgQPZP٣ ̙P'LhSʏZD!in MgOm?3JI:~44eZ0 ״&=NWHWZ T*)p%i2t(yGW6UlmVUFĞK/N0/NWU =b8 lXY2q]]zjT*65tE[hk:]eHW&x s[CW>y}]e/ Rꎮ^!]LIZDWX8.'mVЦUF)yGẀU{*խ]Jl:]e^#]IbɺBq2.pYkL*HW(Gˏ\ؒ$*zy+@(3\h t(ݫiMEt3뢫 UF#JNhGW UUDk*5+Do:]e:j;]=^$+xy:/m]pVP0at%+ձKO9|t筡 Y=2Z͛NWR7Y芡Y-X KbZCW.kuтh:]eMKճUTk*5+D+h㭫Ɏ^!]pBM*6q.-tBcWe2ttrd!DKH"6Lt^ 9+"o-$]VV ;4a*cqoAT ug* 89,bc;Wn*gG~XJ ]*H2RatFjhjj麧Y)8H?MF.*\mi[`8j>Lqu-{w+?B)F ٬ g;~|-#?,zTf8zIJx k_NO}AU@["UЊHOp&B lBoJ#5˪s~jM8@ȏ8!*Eo+D&/q8&6Fd58(q6 <. INQ&]P`H`6fpPug{MovS}Zqɢ\Y;$bvzGof*7:{f$}_w_^F+jr _ }p7vr:]nY[=9n:.9Fe*J69ZDa^Zt@)> "rVȸ#D?s_~0pbFh.&K7#6ddJ ֋LW);r`oTFD%YH6ܡX  eʺ̶l2 楕u hR$u)D!IjjCIJ8%JsC%#N_DGIsmM{)5>0Q^Yu-&A~fIIFQTL&c'z S:EWJ[=NC0m hjEW&!HƏ H".JO}ԼPAaB_?ڥFnx!p˪ Fne:*Ґwb1C{n*;͇aQ2z$"+ s 1>˱iWwC +eh݁U/q*$a<#ii QB9a'crr$;k7|2! ^ā)זגD (YG'$:N(Oմ:iT "cE8:*Ww#bX3~{c[.fi_DggW5ȃTW+r}SU'˩Mos+p )UJޭ9-Z;e7⋷=`P ̵A򪺷ܬFEm<n B:G6 > [!G%Gw1FOcQ6궹ZI'q#y`hRSԻƧ,㱜:AS2KYX] O⻷]ś_P/ۋ?]Oxfp 0w?0454kghU7|q9s[ƽ>kI/ھ_q~q0|۟~z<~{Wvm5+g+0'zU/(Q}$%*J hCglz)peϔ϶6c} '6}>mk ?=$8}~Ox.h m hӨLСDT%6tg8,)(&]FٻVр?RP&O(]Zm[7y[?3쌜acԗ0aQ;C5"t׈F܉Fl 3:!FFgldj[,&vIFmJ pYH )D+Q"H\ Xiu;#qx{ź ):;C"X/^/zqǗLng6e@49.8"zyEۿ e`ZrcSчqǡPta*lYs\]=OWe??ufwzIF=G?R#7ñw~v>= vJq.JWcu5vwd#`uy$.yIk)lH%f6$@ i%GoE>3$mAKvj)eyi3[0z]b(RPKdHR{0=2.Z\E +0)|ȸ:M]fM9{X2m J&q\sT==v~A*Ry6 .+kU!g.H {B.lbK 0 -njXLfZp []>?y6]NhԆ{ƌ9rƬ2Y z IDDyE,`-Dg(%:Huƴ6d$H6YU4)duNbg.\t ^32:?GH? ̀eo5VPIseҒlMh;OGfNS;9hŎt*r-GfJ1scbC9X*ɐa2^rRh4u^UN&yL,+L6e :ˬT6fhD]'wE6(ijMCP70IU\ʖAW~;#LlƬ|>z,bcQG5{u * +iL,D|QĴ=<xY܆/{x)"D17O s3sJnw{')*|ICTM2Qy֌0)o&M" A"" D|{4З؞A3s?% $-=OtY7zzVOb*%\I =+ggWSg'*URfBBVi-eviy&7kt>ϱ? ? 2\$( ⩺&0 ).i4C>POkpg-aD6[_atmmtCpAECΗsrˣ܌{x!a=saigkv6gi^O1fROd MlVܒTY{N]=;I'$MEuY{2GK6ߘŶEPF7_FWrG&]{ l&\]|~+#wt21gއ*&x49S^ZC<-ǼMũRmh0x%="@ Tt#c0pw fCXNqIuQ٣ ]O)<٧4\y˻ZR9眶'8 {i ;1_@#Ȯ+U/>լJ] r4V]D:et ,T^L߃,HC s}qX',q2Y3\VXnU.7&:O,&GW4>[lZ j[iW5qK;mjZ7w0ۊwn ټ 絠, ѫG(av󰿪am@tÉɸL `:C1`ߣ* P8~/m s-hz2EyѼkPeU2G\u>.bl2JB*UKƃ³P>V˘C|X4 L7BB_*g1 WFktIJEpI&rBJft Y#{FlZOfS1~fKfw 8Mn8n] @0aS`~7-rއm}7??'-)2?73Cbό!4)1M2jz7 %K[VyΗm;|q_ *{CU N*~D.|+U RzB}?a?MYi/ƴ# }w&{Vlc)elz5.1DWRT9d-%^a$eoAނD&C9d!Fr)f (c:9$>4B`zv{VQt;:9pe]뫸s+-WPT.mԯvgFUk]9O JE ,'{td%^ɀANoZc'>+dYهB Тa_TXDP`Jy2l$O_0Q*ޡd pշWd7xxI:j4 18DB I6  }<[}oqb}l-kӢnka8c%QGveo/'{[\=ّyVRIp x^CRD0'jC6);K_#ƻc5z+W3esz4t^Wu?=;ӻt7;MBg40})N+"hУKy .gE>K*,rbhɇ߮HwlI^OJ᪸7 ubTMw1+](+J%ڏ/$LKD-ڿ٭oCU>2EˢX2߹nIVb wE2JhY$X6u}k'|z]=è[<,_|ƇE=y#/'c?Zs`"(CU6ޯ m7gD0S˜-(Zmun-o? mJYh 'O7^gNǻ<D[tnrЗ[ o=mtfNoH2ULWeĸroƚpLjB'KSPn.?hzuKZE{֒A[ŷ"^ftMܮXٲe [t;Nl 싈>f2ƙQЃ=ϸXf>e@Fp sډuIlzP68lXB?zX?!eWH 2 68Tx+"-[D UXHNӠ*@ 0iz|vGݻNmV7f[Omonnf?bCw ף]8xV {MILZo74%`64>Naȍu4 kIK.ƭһ2u!zo׎(q2ҋDOيr[КQwâJf7@;jݽ/ӣ Md|f1ʅ{ޟ:);6Q'lNsYGB0:PN}t/(adpLhz,8GJ(93kgoĥ4V=,ҍƻކv4, -oia++Q389Fj͵ GZ$KӛF2˳F89%!Cidc9UqF'eꁟ|RNCiY&S07jRNqVϘC IȆV񜸷8'Lcܾ*<řIrw?XgF_|o(q1R9& F9iўyf^OJpLJ$ZxE[KI Cml099HWbŃ[por^nYMܲYEE2X,3GY$Ws{$ZbsT8;˜{5GW$XWEfJ&RWG+*Q:CpEj[ǷWE%z?[=|D~`7{0Wnŕ n*7W=WXVH\ jUQqUTnTqu4T&\`à\\BWE#WR UE"jpU ] ZCUQ͈#h4&[Y\ન8t\Ǝ:B\L@E"`ץpUԂ:]h#WeaP>e *73&?m.\b)^ECXdxɿ1ot4Rn^Q<~7' vRJקidE&LՂ~T<6DMsvEgLr5fήr**a;F\c`E"jpUrWE]Q9(qe-3&\`ւ7+kb# ̖EoNqI0܁\q%njJ؊+3EϭB* VZU"T+R:J!F\!(* \P ZҌ:F\h d U[`[WEмG\W%j`>觟Pz/U&PF-f\8dzZ{QgӫknV̳Ơ6:aixoe>6t4o>G+nQs}E u!P C*D 4۫!] ن gy \2N 8fu&5?Nf/ooU:twJPn??`N`&q f6YTbi,JyKVKmf7j)\AJVKvݦXTC^PF뤶&N g|֋ Fݚsʪ$*\`E9tLC;r^0m "\`zpUjE TF\!,זAE*WEƺ*j?0F\\-ޞ0>q' \y#ĻhR vqڢBZ Z"WZpUԢ:JG\!ʚpEW$װjpU8n*h]#ZيpE̕pUWECUQiG\#@J5YW a謡ȕ\hRÈ#ʷ?W\ZW"V+RkqUTh]#lt g^U(XZc{LVUi >BZ`CtQ؈#Ĵ\U+~ S7{ ᯈF:B\"\`#e5*rZ6ApQ)GqeAִPW4$y-*jJ 8ۮ9;a)u`7Q wTkM#ζW)z W$\>0ZrÉwJ(#W$x;UkE-" qUTquyM"jpEr f0XԂ:JG\!AZ9CKNX%6]x -Kׇoҥ-Э88S"ScI[)s=rZ{(:5PԚ/`!QLE",rZ+̈#ĕ[>pN3 gc 4lہ%FԴpCQԃ"Yg.j:J;#Qj+\Q Z0CUQFqeHCENEW୫Wǀ'HͳZ*(\/'FN\3Y^˫b"re);J™??:*]S%t]RK=?~{&tڮ%^|/wu5W1~ ct{AsWl>ʲP+i?z%RT?/r.\ο<]ld#ƘmQ(ּ olcU(R <~|{"[Z0+uy?ΚSPB_QO{kJ&#ozZ]~rヤGw;Rf465$ 'PyMpR 49qedtȣ\y J2YUN}d.O^]e$3T?pUK3ORp"|jdPAM˛>?Dv<0fyVB2k yP\JTCanSTJ@a9@ₒ\]Jg+jUKYkV0!.R錌s!$s,)L,&.16IUKRuw1mQ^RcbB(NľAY9zC9xQ^ќ*3Xs}tbKyT1#הvE.ULTDLEEaI 2ƇDM 9ځVfQ fR.ƭj!X \C,c^N23ڸE@I)AqMj'JZP$FO]RnV 5K>.h`xfYZòRlw`4 \[')wUrNuq$8,gq֩;k!KGsmjm9R@h}eu)D4ٗ@5$x)1ZͅQ-*W\/4vC,3vVHxvYhgrDLVTrU,#Ӆ2לgA]ޑQmw<똨wg-&i/=}JdZY&֔@S,hO} #!QeYt3(RAUvNZ'$̿2C0?ت3PObe!R<W`UzXu` QCی100!ƻA Tv:d6Q t56lAhqU +0똆'z$;փum,J肸PV24ITiyM,C,bw g,vy{:%+&CFPr#VHC@8>U]تPFu5wNu2<&[WB(NCk?nov}3"d*Kq̕'X1Xk3tdD4Hcc.u%m[sP6DR& +G ep݈PA~,, mM!gT]lJvl 䁈:u)u`E[Lne$+c/ގZ[gP?h0%/@GmX tJ3-VUBi5eHTyPZG(o<*"4p[oUf1t5n(!/{#$u-ҐQUr#a?Et[r5wn+2TJ`]PLMYځ DGCPʣPPm7q1옍}u`+Ikj]ڨ ur7pݯ-;:.P ê Q GʅPeQERPGq1*cbY/xp6-й ᧀFSaМ6nmVܢhR+5VTAZ@רMk&=Lb2Վ *WZ@׶y~kPql?j o94p^ kJ^VAP ֚&-m(-\^j1Wfׅiq#0^[JF$ӠlCi#8 8%oC)F @_wE< \T*\o5fs˥ZpUi\CC,Z*cQ4k&7")SJ,k5c4&TsE}#W~`AApՋ7B??w{q|~yp7,[q(Ȥ 6*nbv ,ޢ8}>oF 郶* dPځAB*Ce/V~ ޛ=?fZ.Ag7k>=?j>ivݤ۳pp`@6z~|q|tM8T_'#ο3M__og8?ܺ\ChAFoM>=Am=-0o $&k絊t86?@b&d'N 4r@>:P|r'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q(Aa)IM王 w:0cn@@qq/@d $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@_'2SrAX?'8XwR'>:Z $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@c'PA (t@d@E:(m'g҇ m'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q3sm:U\0??㭦~s͆w_@kmq~VWg}{h(66CT0qKzC^+4!Ҋ2'CW T:t( ]!]?]1'CWdfQ&#tteFc&DWl ]1ܨBW@K:]1J+jLVO8ui*thPF-tt&&DWHMbv$tt-Ozc ]18v :]1Jxjma`avw݄+{L{L[xGbC3厝=Vv_ ;/f6)3nzQtL؄CJYO!}6;K  Ms*4 ɪ]`RZhzi:IM"Yfp ]1ZQ&%ttE+h2tF(l||ЛC3OfkvoCZD! ۱5;]v^;! z]1DS+5OuEt#D=+RaBtd>~Ɲ+- ]}:3%uŀc ]W~*thub]!]#s/_].wl/<%g<:kC8fx볃dn7i'|qyRGW4,о,4\^iAiS)6 J>{ˎ`5 6$tpCth_>ae~i  w(~߸Om^u2=_?́3SH"Lj{ f ڐh^A-A\ g,\hdž 8z3` 6j؉oc3tP\I|z|ܤ FFedQ!j-䇔8m2تA|)]o`R).M qKlt '8m[tpdV @\oGTQ ݻgR3cJZJ bi䛎7k M3ڰne 7H!''DW 8kTRubv ]]6%uM'tmܮw_}K++KQW@k*φG=TI%z6~+fh4f֮te::9OfBtA5q*thubv%tIʤm]p4ԧm]+&t?tER+vOnpS++Fi>ҕs*;ҡ2$_/o;y>\ cSEQն ∲NJ ԽjTc4cN5>8><`4!`)aS!YF,P!=$YSZq&CW 7M$ttEs 3cՆ||vnRqf0f)MOxfQhzi:hJEpI9=bAOV+t(\GJ$&DWX妳fpd]r@ul>r}o;pLOAnOd3&(;v9m@W$tС:{GƜ_QūfbagGo<:s7g][~C~4p _c*wA d<4`S56G1#s{x{.L۷g?㛙xs;G6v$r|1F~w=VϿfUxr >9[SS5PC &[cۨzK]1N9Z6cJBG7]0-/x~0Ufהp'8t2z*yƝˌ29+}or_Fa3.V:o|k3֦G.W?__Ƌxxә}|-s,/o@}_cNzho]ثfޖ|6WItq+~0_o޲u/\ߢl{Z}&;ﻏXNՋY}f5GO._ͮΖ&g|o > }Oc}>v]?PU!/8/y=ޠq뒳Q모TJg=Bc2W-)5bRoZ7IJjVpV/2~7^?{q"9qAċE?WR$!W=CR;\Q18﫩gK4@yNN^M`W~l%% љ_zq؛RrnGr,S.zo$7^l8$Dѷ/ÊjЯ(n[ՋfIVfMisIIz|dOltar.h0ʰxS>8㲓:RYc\OiA"wpzfZCσpNt2[KV/ts#uj>WO[yzCm[ `-6";t՛/~p=}5\,lhPӻ,4U=IG]O`\9lt4ԫ⤟V}z(V?]]5<%y];ni;lu6Ai~|f+.sJ?dkV/u'gTDU*CJPV ,TAdPb @*yFYP%6K+[؏w_m~\]I$h{.Ye+{`_zPD $G-p S)ju,Z[ G$e6Xp$ D%Rr& | OOk+{fq>ڂ)v k]|Եl uKuOzqƱ[uMd_8-XնZc>ߙMa@C0,j]@D HB29*-eBe=iN %33h"l =Q> EtNa6vlP} XW8QqgVV~=Ycl݋JqCp]=܍GmO"b& KڞJlwT/T{sg ث2ED-$G%U0*8,HNE[ 1{m:ښR괵) z0Z$2L<&-<>3v&~VvBw/.%*6d|Yb*'8'mP`qЯ;S:ZN[$jHg !M&[Tch 5%S-Ȓ=8!pL&n,!'P:#fcw&~FôB1Okwzmκ^`h|\Gz9 6nlaG,Dwt;9ܩ?JU !2I,: 4kmG>LTcؙ8aCG8x*38#Bwb,A!̽yH!kQ$QUq%X 87N="cHƨB5j EJϥހd=bgu XWs:XggZr_E~gKDg6e@49.8zyE˿ e`Zr{;ӎC\2~s_]=śf |so|uzJ_7Z'̵bKdF$=>" Lُ[nx678c]%7a{:H{A탬dXI!Ur^giI{XN/3=Jt&uSjldq7)mFɍTYb`CB t;!^rV _ԑY3sOޤ-i~1Ɣ2纴-#-QBKfHR{0=2.Z\E +0)|\Fަ.x'y|tdG0dv3y Ù]Y}"?VmoO:0*x:YcyƏBn[Z~FβǼ-S4HVZX Q{m"-1 ۤ 0"0m-4U5eMt4YʂdF#7FDƂ2['Utd*`{-${=6 EDZ)wJX1&r':g K1*͝ϖur.0iUB!BL?wx/4=ʐu$b!$Z ɒ4HUK9B$͠5lHA{зg҂W) z>K7&$#Vh=C-U`s#R {?? 4]gl͡_ !nlXB[%Ǧ!f yy'4*PU=Խ_{jX\ib./5}@<)axɷSVO]jDJPq` F "^ƛ0o.pſV34+GW_W&2cq qMH?hrv2 SP]~7d>K4_-~xr i r:/Zѐ7ݚ"f$&֊?ywmo{m^0o fWaRK w?+Rwiy~}+ؖ8#Iymôì*2ĄQh,|4-&ӟ9r먜uqFvX{o$u FϣI,]j_W"pQyǨSqҩ&kY^?ͻ_雯o^۷?~o^woyEiKm"l~,8  {вi m?b\->0~\ W~ƣEd֣l_5 *zzifyPewHu4hRxb Ce7|]B=ikZ;9:xOG$7ΓD. ed)H"XP$hKYf)!# Ox8,\^Xϯt8\A$Ak'4C`"MfDaZnS Q%fu:ySt6>}A0vL0t۝߆|5y瓛.I<`9uyRw$yN֗)lP$;Ha[ :Tre T +;PhٜveЧtЌ)% YPh29f`lpXpQ{X &C lKEҠ&m+JX(IJRȆsY2kI| ଵ1 !bȼcؙ8q"yjCP]e0 BKU9XKdl=`ԺM> pƨt# l{1>qx)>X\=|orvG>bJP019Fҧ3>S`%Xx*l@YgJ.Ld;#xcq!crFWtp 9!@f,oءCdD%-M- LaRpMZ3J!2.=PKo2o2؞33cWіv*2ZM}Ztj[7\֓Xr;Ɓ+e.l~ y<*URfBBVi-evI8DЛ횜%&{ F5ͧe^>QN5qTZF+ICv'\i~fOb-QPnI~ae]#F!E#ϧ;h\nJ@ B6Don0 yΦkN[5D;Qfg)Q-8bTki'P(la*e=* SzDۼԭƾDavw]z`܆}s 0mhVԺUzw4zO6{-l:\T}|Aw>=<畖a2oquwv=?Fuʚy;F͏\c瓻ޑT/>6osԦZo%_8Qr xXÜ_ <Ӌv_r^ҏ7TbeKFk)c'vA,NiDŽz9ʱ+Q@Icq_iNsg$K8!x(;8FIPFqTsIL BFKyuTncqG+tQR; Ip<' `() N/LFD]?sTo`:V0%˭vhKnjq[M7LL9KP[5irBWOTqWeb9ȪV )w.~B*~jOS0 - fr5(F(g2;hAҖք.gϛd+~⧧S9Ԇ{ƌ9rƬ2Y  N:d"L - ft:5dQI2;a: 6%*:3qj Zn,I!R6D9ĵSIg;4FQ4ު481qS\kxgmaGkwmKf~T@>&.H.~J%R!i>%CrFphpztUM94*KMrkW̴:=}3_P(zѣ S m#Ly%4&N=sIxbn) ƲinO*G^pҥA w^T@xۗ nFe4ք!G}}q8nHYClk jfKzH2%rzpUl9d_7l[w&W$_Sn?Z1Bg8\+jZ!ΩV=[T(@/2rH+u\,׳ZgQt|wLmȃ&Qd Ɣ +aRw҅ 4q4GcM;O} ڨ4@FE똄AAd] =ODi]/}xhp*򌺏OqopcH+JU֑%m 2x#u:ii9iiףM7i|\1(v!\- 3RؗsKsL&iV|'91H( J\=>b7zy4dg>ˈLj(vTډZtN 1{NH]a@tr{$="Xz`80xAC=*w#D,ZsRAS*}-UN{x7ypK~'Q]m(v(AgLF٢;AȓN f,^-OD<h<=ly^dD `d&w^XYhBp},IQ#dX1 ;Dlp:;!fSa{!$)±@olַgE] sv[ ǡ8C/Stdw_6p KMx󓾽6|3FhRA>yn,:T Y}`|b< 2Z奒$ཆD5!!#/apsq"E,_\\h|ƓeV-D}R. k)ZjUK{eRK>|oOȩ])1Zgyگ7d;6~ǣAWnJ}u9Zn k:u᪔ʓ+W]^O>at[$5Rwzz岾-/CCvqj֢YW>nܹTIJWnofE2jh; M=پ>}v7aA-aԣ|t -8l׌y}FGm#W]և ػ˲7]|&[:S^}Mjkoz{o vop]+mLW`귕>xSgq$Zcɑmj<[/w}AuJC"Gnb܍?ϧӱ#4]0wnx@[n!>+uc2&s޸[ ..i6e4#Y} oHA ˌv2SNШ3OB%7X2C%b }6]7ꛆ^A5 OÎ ܊HR"yH*,4 U!( WR/h/l=VX qمK!vϳao|q דE7U+ l86%*\uQsnRHmL粥 Y,HgA+#$K&d@,B1<Ť&4[`6rcAOGdX>`)dnm̜mk\~ۻ֞IT I'ϥOO OI݄jwZ:03u'W m@_ ~ r@3`Xd\ LZr{0S S  ׎E0NF)[٠5UE#y8NjG#<9[jZZ&Hy^ysZLȨ RE`m.ǸJЋc;<gu`/ZyvbA"vX䡰ۣs֐'>7˥H`eX.9Yy)\8>%XJGμL5GPL3$W-AsW4RYB8.T!kXlQURɷ&_xZXjrJ<(iLF*Q q-uAXU X'o#wli/9 T맜rM^샤m9S:iZ4w:L0їe?/!a|O!0pFԠfAZK]nȵ]6ej.ݎKjD= 剆TDȢ?=.[Pf<`h=|2s4Ѡ ]Vbr?aώ+t4D+!#9~61b@ŜU+VW:zYhqq\,|6JJ+p3\>g}ҳ:4:yDk4euR[Y‵E ߺpE8VuVX9ɓ9%!C+HAVX(\EG'[O@Yt+ p0kc $uv^ ic i3Dl\RV^<<˞3 zk <XOӫ6, '*to-B0E 1t(HBQHl&[˽yf*OJp,GB Op,r$8,G0}> LNk-Uvkx7f3I?h4jGW:; 1E6䙸r!֠PqjIsәbd])N-"o{qjRa_Spٿ0Y8: \ͷ|54ZH0;0A;&~a0MgRDi=JtҾsV{/Q ѥ=dd30]CPEv.Viʜ9`3pU5Ҩmb \@FvZšpU}<D!*8 \kj;\+ pˮLn]d~f\煫ݬ5IN$Q\sZZhp^<#CVy$8 rCՔ)e:im/*4چ ra|V>Wy\28yškm"GL>Ș Z,!95ԞwNy`ࣱ&ϴxh)xki3QhsMV;o|L퍙2Sc@z%b Ei^^щ }֤ڮsE |:a,/.=s\jG.#`1o;z)( 7 _+9GK*ř)Ys;GuP:r=0F#yFrtI 24љlBm9jL1s^vEH58EO;t*mnm,%8'ߞڧ4\ʃ"/>U[s(|8ނQ^~62y[?.맼 !׃^( o~Δ~stXʶ')Gz^4ME{^4ME{^4ME{>wd)9Ϳ_̿2W)F{j9Ѹ|&rVw0bsy( >is}G@'˖!S}S2 {~!Đs!qLht^R'DwG_!Kp7R wspBGD/Ej5=3$EJžvTUA40ON6,|QH8Z#>j$rqϹɜiSO=_=#wH|Pcs,romof4C$~r}3G>l5*fЕ8+H6APu~4fIU 1!VJݞ6xsy\*Wm6:xĎzWn'lRsA$|>Ђ3LF@ ((qaFz~;E vfs`""KT)IiOD*µɂ@D$Z`>;u0y&6bv*]ލ0Vmzr]˱*<xJ+Bhr0JnukN;Sgt|֭32pN&d!` $gRQ}d.:-dDT8h5иdLP!gcBq hTv g#g>lEOE#C5"-i{4i h$$GȃNh6QXfT4 8cPT#X@&)Z> ED DŽNScG+WQ/\NYKՋ^d^6ϙR)G(6Q$71QMu`E#)zzTa18T–M6Ugp{U:cSwԊm(ҰW~\J*x7V^_^9/*Ts<&RښH >8a+R*E'A&)ͯ%AJV%[Sȩ%d9祎"sF[KV e |#C<Qa"Z2dXnk"K"AEo3o$oO컇%ޖ6@ۛ0cp|C^3Zv\JWV}^3^v*aqs'B<^HDhJS*)s)oFjc<y+$!RäSw5V'OVьFqF y̖=r,iߞtj{mJbp8TYaHG).R$e73  Q)jcI&.Dc :dJP#r Cva!Vpv!+G<a eVG9 <%'SgdFM{a9Lz9FǃdI& G! w`tDvEԞl]&9hu 촯2 MgtVAN͐gsW:Gzuc.l*O$gP)|%8K1V h!:B7}Uߨ:57A+v$7AGĀj)AA9r@Bs/9!nРMʀ!Fr[$)N$J")!d`$POAs'15aZi+_ݠ5tOՇ]v>ͥ*xͧN)mcCWz:rood@%V. }$J^vi)/'Q~<aEX^QZK-U"*$ +{">u0C̤%gv!HiPS+"BP2@B 6螮;ͻ/6h$!͜qAw 7.&궞Ӱؠr F6'QEp*:Ґl%DG PH5>!zygQ9hOf3W-_̊yŌ}rGt4f8B>"P 9Э?+7k#"$d-#uH0C6z}6!c!ˑ9Bõ|W+D;Cd0y _ingk,\٫u3L\ xzMdɎlsrDn%6Սf xB[»+y|w޽}x\qc=}߾̀xgw.j?}4ͻ1ʝa:loqu3{U];'ʹqGΠ{[xŭlyIo S){o xh0UP@ U{J˞r>=8ͨwkTZjRKfL.1* NP\< 0/hU#^*^렗zo> q>_-May?ج6~TH7\]ZOP"$s4R %I*K"0I9$ABLH\F:"0RD3ęxc?YrP:^9xFun5Guaұ)<>㕛vXs^i-Sf[6K~ۮdZ*n.^ di·7,DuF.Hh,KZ:1;*o [ս/VLC&z%'+o$"Q,rA$6hrkѠnd֑Gj䕑^e.K*R%Vy }ֳА:V)C?fC U<[r5i .CtMbCTQg)poGH<a U5<=mWz8'כ:g"mtܼnƣhy$qpw&.ow'DhҴo"7wxr7˴c{Z-^~x]{ ) 3_֮tt-Yzaֲ\7_B{={-{OƑ_k p1 EQ)Ig3Sx;o$/  "DγqvA矽&~я?&i>y^۔ 3;0m?W&6T$Yk" 3,c;/xla8Z힩xO~W?Z(ڜ-rm|̓׫}v#6jMFA&K,M&oUgz۔v :n ⧱m-!LH?T\iW\`iEyQ!iS߱Fqy"rXhq^/Pt\jnQ*3[OQr9 udDK>i͂ԁ qȜS}@ 58@@L uPv‡T^*k %d &9P},5?"D%^-hv[ ! Cu'mnhop:~{ [`≀뗮v u=n2J ͱ 戳LS,VRIb(8hJň3cGSwa㻔f2="G bYfdA>/tQ#50ޯ43|\=G#7GaBFk?gG"ga:; +w Yrr8/wCeK bEgG!t8{3K>:j@feq:>R@7J`&0 ݁^Ɲ&W!|ƴm0.lύRuh+3PvO^ͨڊeT *"L4ek9;`jebaw3\Wf:21$;C!K志P vӨƀ_.ödcw^eMkׄ JzUEno)iQLy5vD jok%k,9vՓgohw=[cv,]o8ކ&3⬷|>{S | X ^x"/KXC믠G64{˫~_K^Gp | [:fgb׿ { Ng e(>!R}1:NE=I˃YdxY̨͆<`q6-~ג/)qKg+&=$KV/_UL_ d- c{&Z޼Zg"ʓ U-JǮ:wv[8٬B@j̗-ˉ.3kU߰& 0dw] P6kqBŘzfHCH.#h@ŠTcS. ܛA ڒD ^trHDN;a)]&ZB.H-xnbti g25c]v^@9(}>ܯ5mkqr܌mz6)s~^SƒU\b]\n(KHܔƛ[JQ2{`l>rT3Trb0tɺ(s%N0Urb)'rbXS/S) Bzq%]qo8Oޔ gѵOOӿ>tۂ8+$@  OAsr. Bz h%؃"EA,SA33ETNLbyY '@.7\Ip5>rn ³Nٓϕp<{T$HȭeKSu+t2G]d>;?<8 9< 0&xЌ"$gJmOS,T6R0(v4ZleF:ڀmT +2-SpkNYπ<6ï۞,}}f,o+:4KAl_\_|׼ף2^TV!sͥS)fG4ӲXt۫hY;Ҭgb7j0lqd)J&i\IMp\Tq-\ *cVF$\ y$,Vi,jRʃp-ḳ5e5pTV`_Qd:Ƌkr\6/Eݮ^͹t{l rvΪ˭ a B35X .FT++s"(h8t[S8a \y)e`#R!8IEd鿩03,*͸*O!H&n;J%B*i*&֭ %8 ZK]ԜQѡKJaM[@Nq9 D CkM fRSҷALaý&20ouBh8Ѳ[\F9*=|룐Jc#>=%?pg"ؒ%`Y;P"X>eNj~԰l)m.__8Nz@J^ ^bPj 8׏t4 i?r0&ŸQUrg1g 5Jͣ.'4j\TjM$dK)+&{=+| :HЩxWu6, '|뛓/_:͋?~yɋoN0Q'/|ypV` &07 zfm -m3lso1.EmNa+{B;t$ JSq/ j8s52}_y.[֧f#kS/R"]\PB%R+ܙ/YH@ Z -tĝ$+_q=F2-Sa9&ٚIF% ɀ02=az:Dá &D,87R%B@QLq4YJE0pdwʽan 'v j~Uw>ss<׈U6ނ X⡛B+jgI+~ͮ#e6.W,;OEQb5:"W2]o0m?¼ߨ<:]/M.V:)cuuysߞlQō#_xؕKOCt@b>H/.}W{Pϋ&\nףު.4а|s'ɫ]/pq4\Na%kCX܍BöGRm.F|TɍNy3*Vf9)b8GYsc ٻ6n$WX{@MUmnSM\)ʊiRKR:WטR,RJ=rf04/CnqGrql|/ZG:#)QGK-<1č1wIyYuNRRDa%צDĀ18$L3 "Gp;+?ʛtqy>#^NjeT,(Ő"0~ d,$3syF@zL$q(BFЧ{O_5vLV`$2-sh ar.}91NfBa0\<ˆ4 s ~\6*:*FɶcڄV2 Tȹ_c$ǢqǞZZo9pk<8T'l\M 5X"rV>̲dqFL &]CQYTت}?}F9W'WlFѥ n~|&GRCy?.Ͳr3U+ARB1MNhk"Knq{Pq'');(nUA8l0ʁ|0_<_ T-U%0[`JA=/m()3ͼ/Pe \g\bJ-\ #t^3.ɸ *z5nM9ξ;X2ll L&  V&Lyd?[Ë9X@*pW_uE,J_}GW?\fsW泏"X?8MI}_/gQ.)QZ#K*>JfwZ\)&Oq+gsm`i_}WBVP־$҃V ]77,$%{Ӵ v:}]j]1G]۽ jywm%3d)Szu.tLjoDmog[,7>N/g+RK!r_?pxvM7j( &_NUhx H;&P煔1GT y6mP\ BåSwVgF#WUdF `>HۺG_xy6OnϠ-*cک8cFjF,RrʢuDD:ȒIB*XN Io 3 E  ØRd,31hLAGMj ,;R c44%BUFܾNw;PǶ׌h{u/ZN+9%2O1A9J$$[J3JjN*aeqR.RdYJ6#Y!Y֐r4Nv76 6`󵷚rz9n3xp&|u>~&hmp˿_m1$RZ@L1˲p,rOZKaW9+BMqYbZMr7ILy- mq&L(4b,%Hq-Kq˒~渓;4;elSZ"ߌ~l]&iyzqNNAr>;r*.filK0IY> =8~v*:)1f~zuGQq86Oftތi#v-{,xj>'0 )__[US-{C@*} oݢ);"kw>|HHyed/h~:#kBդY'{kB6:߿O4 N#(wEW[+ۋ1fJ UlXG?pQoqXr˦[-iny6fRmOivfi{ Ĭ'~jMRFbh,_M*Iep&7R}n,ˡ1Iհ>=}Ի-4$AD?jAjɍeΓY09\%&݋;Xg&F:lދw'~`y_Mn-Vw>Rx䳋>] \ZUH1eFH%SR*D;cVI.. JUbИ 1&:"RJ @C .IWqS gBmj/+5g= H$d5J\m0M'W2Emr2itn;-L+v(teViN/EӼ"D(tj1A| nhkkug5o/s=ʒ{d,c3QX fi!,4yZ3!Z!hAq}{[ 힑u͜8b-s/Ό_7 ` ^\G[c_P; kɵ/%]VcaWC/1Fx8KF;Ӳ|@_\F4FzP,Sg5sRIl㥷M沔^!Qƛ:Tŀ}X䉯g3ޕ]Қ Ia*,$mĘR9YDZE~xM.7υ|u{9 .*>Z4i~ @j=64ɭgoonݷdaS|rRcL$q'eлn}7Y77)H_~*Sa,gL4Es0ل<\Wj2> ̀bv1ZB] %6rtr@zx)Mb1@^K>lb 4rWF !!c?G[|`{s?S-7l]JH<0GFcl+XG JD,G t:Pn s11^{0+6NY\E!]DH#pq "K2u΃@ad2IR( H,$7C!E3pxYoW\\o=,]_t0|t 3#QidDE@Ra8K!#{J F8F(|t$dzrK/N^[QB9kTΨh%y-]*&CS(~wkzdsILJwNLQI)ҿh=N "G@A2TIcJԥϊr[Wf␅hdoz6A|n'Bl #2yͣb9um6фQ$ZJH V`71٭VAHTК8yf>1pOu$d섘UL<6saIp>&GX񈄾֛b?j#Aew'psw"+Jp.Fs߯rF*R 0 <m4wEg*Is18}܃.1yF_&hJcУ"ɞLl 'pKǥ(7wkŻ0'DZr'Iާ$"q'b:XVY٥l1gE>nYM;3S#t%xSZȎjo鯧 nI66kyp7 n,'<{>V1{-W1 f~/Ku@S3bF^k6XݟW}J϶ov )ZQMf^kw}C ŪA5YoЉ;Ƒ lSE@twAv2tacvdI#}I{NQӶd2e,,ڶHo9ԅu蓕Ż[!xMu`ŢPc~uSIx%~~"/r9o!\mlŏfhF}m&71<`7^1 &pminWa-$Ihi+]Toobiw(pQڲԂ7V$L 3hV pMO'tϓ\^eg29(h< pZ@IquAd:x |< "p`rG(F=e! c0DpM=|Hn]bwhdnwϕÏH.nn.y:f%$'7CG2,~Cds宅p}LX[z[}܀CfX<`^=PV ~yZXoFqCV&mHxT+8;ɋDDndE.Z[rFƎ++m6ЁKrti n^]x`I˄H,>[*Rڤ is\ƻb l&dSa$uhVLʹDmivT4D41 ;PB{i?FJYNh11Ґ:E$9dnVCVXMp8cB3s FeV e6Nh}Ǚ,9v,R0#4';ACTy xk~=Yj(~ObŖdž?{ tHLvMntURemh ǒEQh{֖uY[N1k>WA*U*\ZiqTW/W!Z(tt\,992ɕ⸸6\S6\5=fp5 3zx9\ZEq*NW 6p|YX̯.^$30;zvqdf&/N_Tp]sڃI-iDl&L!*asˣasP+eTjٱL9f<&\`Q4 r%WV*$ A'1|A0g4\\p}:@%& &U JS r5WVGWA%NWQoTq7#hAΥ+e?ySZTW,[p0[.#)a }N&Eoo^%bAm8>}zQ: Xxx:V"vLТ`ZJU\X,ZJJiSĕG]ݬI-U5p:\kz$8"\`BT4 r9WAmw:A\"G_[Oı*BUPIHլ<~\`Qg0ȥDWAh=JE;\ gHna fOff>unTT9q?ؒwx6:=t>\0,O')*>V~~qاBa s}3vֿL:ϗQj݃y/]&Yڒב s^) Cnͫp^_hn+3$6Ҝx2\%gU7IF%tF> emYu9J)Yʌڻ< 9*L,aL>ɲУ,Vڜ*,*q,O6a}Cޑ}{R m[2( X=Ki=BĂiP+J;L Sp1*ȥ* *pu`Otr1f. *yFK|w[^ tG k-21eQQg^T/T^q*QP8Yp8(`'4arۺZ}‚0foW$ׯʡ▍Gk6[4; vyTUv~{mK+vI?y |][~j>N.T_/Vf΂E@@o7C7yűdqohgpş'[X](J8S'"Rl,li똥ʵY( 1f O,%h~M.~,HiWJ>8RFRbb}-qǧ}\b kt[}w=~ř 8m5 &H$fKf=Zn70RwY"̢Wel]Rz~K^v~|,1eeۜo9Ii3g4ٿ:qm'e;p8n*qmWKr= 0X|]WQaLMU75=R{gg83Tf z{؂PoB偢m'eaYSp!x[&S?19371թ+ndzxi_<ߛWW';W"Nث޷<_ T)IEcM *hb"1T愲 QčE~|NJ7qs9Xi3L'ԖK;@Q 6Z8gwQjؿR7FRi4}TI%n f84ȥѼfֿfTn8Ck>.Tb#.ȶ}b[rDDAP`:U*0}8"\Mǂ+PI'كNWGQEWA'XpԲdTnSĕVG<ǃ F]cvA^ pERGW#' '?NqpUO-GU=-qDp \WgiW  cw% *pu zw}6śdqΫ`8,V}]xCٷ>,"/'wC NvvIg* ,JghN) ]*0ؚɆvr [q\Af)8D)qo}uQ)0ĸ@1X+e&_ܝTDSⲛoxa4%656j[6KMNI'pŕ/ZK`lxQOjn٢:9haaZsGlit~^_ܣn6U2;?!P2#Î˿ÿޘae)(03:|l''C).0+kUPɰ4egT`+x.bCRJ;/:_J"pQ{jkE_{SLP|GX=_Vr$7"q.f^LLN c8ekv\C̵ˤ&V"3 "<'\,s:Jr ':r0vvXmcs uTX9rH͉I "ԷeXf3,0U- #hRxJR{V` @19&όFLȌjj՚^AH͗s2B9IH=HƜfIq/3ʵ6 Eݍ|i'84f0cJ 1fέP74$]Fp)"tn3cN$h 4#k}һ-ENCSp氄 %am= ށ!8r0A˯RP* S\\&YpbN=BXK-  v:H=}mdh[d2)4K3 *˘S)Hi4^][%Ъ,g9k|.99Qb v`mSP.@.83a$]mU<jh5;OQ^sk6uxxkBS-2j%7)ٻ8++܎WG(fa{,̈́|(9=iEؕ+9ƒS7='3OR@j%BR'֍KA_ZhI5: I{mj#r*dlzʓ -U0ss`J"s; VA,zgғA 1 ՞B. };a:ir@,)MVɞD`%R(- O9JNwh A^AkaFh'|kW6a2PPV/ \-/V׉ [FicnM&_9Gɺ(­%̆4yu dYZec``QTg*ДYnT8b5hSQ5Bн[ r We3@PX|.H1Ű$؉T'׬jPX;M*Ĭ6l}oNKYbpPTZDJg=i_@ezm|Srfr2eA Av%4/z3|W4G e C[A0zIB€(2] 4MU.1RLA9 N0&S1RPk'€:s y `:M/ճY {S68(@hqg8Z]Β *E{e̟о`4W ()`;+_ЊCP,u9kQոEUDI)bug$ObKR  eFkDt(X(k! e 5P #o m${µ8Chy!}p3eP#ڧXP+f%4' ǎ1*Tei58!a%G<7b2ė*o3?8;գL!jh)&f7gAuP<l;l*MT$];M1[Zm*d`f`yG KÝ.K!ąy{:K`r+&CFPG㭐:p XPFuMOU;:@vV+J(i`M0t㟿׏̻>Y.Rur VL%V  XF;KrI5h~p\QP}T 0Sk ) bL'<ыv W nk 9v5+ڱ,"|t P R(VWC+Pܢe4+S/N(ZB߈aDxQ:Wny6(I@̴LZU ה!6"73 7?6:a1=` Aw:+&3v55KA2"4?V w*55gV3 o=9XP)FvTt,e"En$l& -h5w jxcȵ6vٗ`z'ݛ},|O_$;~U`0up'H779c`3 =k(اgBggUCŨ]m^kIfѲp[7n(;~4\}EeFIMG^ÁwäDOֵJC7v#a?U&:E'%\aۊNv0 )aTХHπOt4{swߝy3y{mUؓnOj R$k_|6AR<-4kwtᡂM7o_=?Aoq=>_kՇ??XoO;o:~q*NuzhIIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$I FJޠ' &  bW%I-&ণ$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIhI BG& p% h_{OY)Im1 KV@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I %~s>i=Z?vᣦ|p]{oww B F;3Tp @%M4LpA .m!E5]0$0tpmk+1 ]a;]'k+MjX8vbNjt,ٯ_JWL+QW@TZ;]1Jk6HWކ֮ثq5zbnQFy&BPW+N3x:GxX{> =|ry7w;foh?}?iW5ϸ6{w;htUm1|_|ف7#JYciڠYQ[dHHKv 8aaV00.tJ b=xazt(O]NWCoSƋ,Z <:]桥 y(ʎtet赳6J8tpF+Fit( ]m ګ ,t yhZjte+ CW (th];]1䅮6HWȜs}<þ7ro9^{BnnCc߻*~Wҷko>B4Uri}%fO< ko7wo맋L}ŐO;Ta{OkvӌoǏ@|U]]wo*s#~O3zi N.7ԉx=Uŕ ?L =z~5Sߢ-CޗiFt:O/OeZpfOV} 5F]O S d3 3[|.^ RI6՛ypd89BcZߧܦ=)O~]T#m`ph3f>b3ʤbob;W8z]p2vb֏BW*g͎aǨ^bV*E5V엣+9tp0&vJѪ=Wte9JEwDgNj\{au5 +Htet5DWCW CW6e BW+/ ~b1BW@.y( ]Va8 ]1T/+F鄮HWIi ">nꆡ+1@KZ"]yeTk;%3 ]1Zz3(&HWwߨR]2*A9Zyy8; 7 fN^U2J#;[騢#`> CW 7Q hI~͎QZ/tAJڬ}RDW wZWQZ+t+zӕA,F_za\#Z-+AW$tCODW 8a p%vb ]m!@t+l1.EW ׻Qի+$EBW+kXta4 ]-4BW[+Ƒ gF3 ]Sjt("]90ʛ]׉j\{]IZl5/gxq^3Lҫ~۬u&HŎڳy|'IN`üObpapb3ڨn2)oݢd 54 ]1Zk+F)C6JWɞL%PNMb}Ͽ@λ8h5 M3\3̡cFKf4( MP2RT+Q֮1 Ajt2a5;vbn5;FVoeBWrzw+PW]zn\a38'5 +{AWNK^;oHtŀ/7QѦvJRQjte\+xDWlqի+F;eN?㱴`+U$?w E0kqF ocR[h GcBM܃  ڨ<QJOuM*b|ݐӼO`|ࣸxAc]r]}7դz07.vlq- y10-BO%҂ 9- R! ?%{!!/C3p}@j"qEsrl-A0C4Ĩ39 V%.[ M[qUDu)2G]x jjUci.mάޠ7y}RIP7:qўOVud^䎿jT6϶շAr|36ZoM[~,ȓ 6V9{wOMq 9kl/_V^1dhh(:)#xvQFDh I`ҭjFet1yOeLL2T= Ҥ# &II8 QQRJm9kltakq.']taCQd-zy'7^mySI@^E#jDڲF'x҈[m B& 9AFIqfV(* 1thU#X@l RpjҔG-XzG"Y"cBs9Le9kϗSQ/LYzI/nx7A*e&J&& Hq$ybu# kN)'pUz_6r30/So#_:`y{3p`>FQ֖Ԙ+==w ={|cP2xLy&erít^]\ 6x&y|}w@2d'LKJHfYF~,'ESA ,*_he2P673Ź<ꨦ{T^j:X9Sh{PFHI RE"3FG HmuJ' B\hsOZSZD.4 3x%LZ˛GhIls9;bFMPUW K˙ađ3_m;77 EVx_xx0-+5).:M1bРp;J*n$0Є,3L}BDɴw㞐vp)Ɨx?-shЭe d8n6GژV< 蠓6T82X)(P@q%@ n_y PWir~>՚D 9"HŬKgXJRbpU}$ )'W'ϩOTKNDmL4At9JrIRJbQd `c4 grvX0@a1rQQ97~R Idp%hsd&`[23~ӐwƭfJhO{Xf:AvmeOL:UH0$L3ߏwS.!{(;?_u5T>*uEޛ; AבljЩ3 VG)sqF ! ; 8<+߿9+'Πoٻ"rV 6~! .u zr !7'9*/UETYt?Mg*E2%ٯfU;u 2Tw',T ׿kQ}wյ1:ŵ2\RҧYN] 42$,((qo{r\~N# '"D&D"\,$ ND"Ld$ʾN X0Z㉃0PvtqM}:twM升wnu:YcV<I݉[MHyGQBP0h98+OhYweHA#HG- Āj)+r rNDK;'C9~b"IRz&AUI  ^?(Z[#~;5t(2Շ2WERGz9'9cٺ>&~8db"|YdwyH))8Bh C-\ AHpd^c0/3>@Z7/'Q~<ɑEXQZK-UZT I֮Or! A?T g`I≄(f 5"x 5HZ;/|ifoB \Jɣ>tN+.<peYݦ5-r TΛ2yXyq;]]NEF>)4$[Ht2 eծ!^ W|:?1%|̆e,@~1+ 䩲Dc.f'WUj kLh(]Cˬ.K7/7YGCEz٤F3,9 G3 eqzP,ቜ6 -: 3-ihM'+N[5g}yL>,qv{LKYrC}vS "VQt]b2^uAq_HmtXUs.zAyOM{z~hVbKQ^Snݡ楒a<novqEw1XYTfŏN@Z)!BzЂ/N*K3kɧ’ 4`:%a} 6Cb8 IIq$Q"HJ3G>DX,-,xGI4 tFcܯSgI;lJpCx%i26hϔ=EPW[Zu;HwWORcT&lr`%a'bS$lWAAE9>ˌ>\Dh.\9ïXm^_ P/Sax 'l?y|Í;~r&(A/J YJf)Գr^g]:c%CpL}BjF&2a,;i3n!z}~ T sZD)3ln%94 V8dnnYv\\؃G-=f͇v"+)%j)v\ėK)*OtP;Ia|ֲx4qvぎ*Yq|p%wA{]8mc1%K͟`i> ϖL3}`:0I)_Wj6W2:?_igb`bҹVpb#VQ֐T&9;9J<>7z3Uڇt&mpyå O=cF@̇"1gz,Z+Ii,BPytz~N!/0|6d">: 4~(`N\9̲ŷKww,o>$9oh]L<!iݺ$uX"f/o$xp6sdPǮm> 3|km7'G-zu$e^'d[u?39 OWٰWz~@etZ*BHۏ7,ߘCFGS(l{X ;s,؀Y &m 9Zr;ޭ&}-k6P}fP0vY4a#A2 uE%Kʶ#s ߈ْs=yTښ[2%5B?iHGr0AZQs%"(΢kM 'U bm^nz;5.011kT99}Ac1eTkT(;N1rU-!&QIO׵w(T{^\f]e0]ZD%1; iHJt-PQbMb{&2.qA4aC [ĎOpr˅ )e`#x(uIpZiʦ#-R#K}m#s7u,yw}tq[k{DF=1ñvQ30cqDJDg N#ڤ ~trNc`D og)ܐ'bk)GQ!uF0%Eץ9*-Pٛ<]w~l̎y`^j|*=̨SOɥ~Ci@3}0S\$rkKW#~,\۵a=r0YݵiqkMJA%%F%Իe b}#KUAoyRl",xl+D'}T}zwR)p|>UAO>&ES~Lr]aɏgU&U ~L']?`}}Cթ >ٙb|B])Q?-K6)dN>~R;T%TZHB@N5}H8ؽ0z;CAP~OpZz KwPR ŸpE/}Cw~\ I^Նmw,!Bhϩ#0^Ճ~Z?d_ D(w, C緫̕Ed']g;H 3s˝j&"|u7ry2Ė}~[>Ss9qcKNoTy.]%Ng=_=s=ς.Nd7#Bʪ_Tx}+Ϩz(ILp;.cG5 ^9!q׫ /ozJJ?Uft_>gavv^i?qOBk N`ؽۗW7,R}RV㾐x%&$UD"Jj|]1WNsI&ۅ.Կ~O஑[ M+3#0FWDrq FnV#qiy_ʩę||U>0@9[e F -φ(|tK᳥.ےyJMa2I [S lӁ#(M9pt0Y\q:=V)[9"ZDl hv)aa1qV9.Z38/:>L<˹,߾{ ҧ9|t׎gmqtvVw؋LG9oĴkBW|r,s)9?7MၱkT V0bZOO>u>d9kSz!jNJA qNS:M`pFMQkoYLvfM$#\(>aOUglBM쓛꼹-;9і#:֫?lQHB`hn)1H4qG5cdeHE+,#,&b9uhg$Z3ĺ,i$0qQ`$*` γ0zϟڦ/񧋅5O+>r-Gϸ7Jzb1fu JĠʜƖVyQz%whڔNÒ`G.rE΂b߶͖&jܿ97 Z#f׮{΋~.QN "]n8k}n=g A bm=ADFG 6!18FYjiPE"d886 &p<$ΓUʑ{W'췵Z%Gd}v|<@$XcXnA3)a=MHSA`6j08\ @QAsn6`#*$ Ca nL5V;(q w-)J,Y*{tlֈͱ$.rl'IKŮIR΍91Uh O pSp%r_*I+ٮUR \LIlX"& W+vj-i1&ہdhJWv=ƚGpVbo  \%i uJRR +B(ʙ[w}$-ûWIJZzpE \Q"{W .&{c]%i9uJRJ +F| h$.VWIZvu2 Q-7O{=~r<-7OrE\3xu(2-7OkQr<-7Or<-7O \|Z/bpxhR^rI_m}PәR2JSû_'K\J j[|瞩y |b9t&Oka _C7CS;Ny#W}> ![G3!e`+y]%Ջ4"a\nj)ۣwN%('qޜIʝ?7YA/'9g[""@dCf`Jd&A%Md [-W*<tLֳm3D k3)VNkÙ0ު#ךJ|>y(<};Qc`G ӤFI Y"nVSBXz՘HDlM&MЏfv$ANX p uBDic)NJL}piحm`mJ景DyO=VY (HBķ Mr";[gv.Շ)&ԒaMkݭ^skhXWc 8@ɇ?t2o-`\6ʇЄrR-՗/ҺU~TΚa1ޘfYF$3n{(MÄTk:rCqAv J\k"n,Zz@z[ |Sl3\oR1!JՄKv`3[re-RB VnXARYVj=`Uk" $L@65r\okn4;X`ֆTh'"B;]"Zb?llXO'b5-e6Bc(U\`;EJH]`EE0tp}bZ1RHWLG՛XHzdr :HJY0hDzڃ+'np% 1l t, L` j ]ZNt(wG:JRi+ҡNWHW'HWxB qrHjfmM>*sxM&y[ rG/&嶬eEW*"mt ЋQzu;J?&U9O># y)agq>Ӣ&12ʅUQ]$kmKK3GyHB"Oƕ\o|rh/jDJj:!׶" i2%o bƾRzՔ/a1U!w9aZI6`h+kh(L0fށwލjp(Y]4<%ɧI][ٸZ4G5G#cߢV (s Ѹ }*ė CjT蘨狭" VZ#aiK oܝZJh@tJC:UDkw*%'IW0Cp@µdZI\E<)ҕ]ϴq+\E{ QΊt:]َb,!tcӕ\s3mrvC"P ϲ]HWU=Z]Qnp(j#] ]1dX0tpE0wBE:A 4 ;]JAh+I+2¡+, ϴqB=NWR[JLH2 ;5NWHWHWROc.)Bh)"Z֔ZMk ]M#\ACiD4(u*O &Ql5 Е0;]!JN,5҈ +Ύ(5l1ZMO@<⤛9d %G9bpB +;]5Fz)Sڀ [ ]\IU(tht#J"] ]Dծ0xWCWH([5N8T 3Ƃ+E(thNW,`֮r;]!J)"]"]I7 E]_\x5L u9Y {w&[hAZnꔭ<3ܴZG)E-sÔ3NPXb3 %}=F"n`bgDWXCWW h9~QRJjݪj$z@''$=&2JfY䣾vph |$mNj\Z8e>ޤu9w?2UMo|+.J_Uz?'$XN*86fǥYfw[@Sr"M ?\MxvK:ϯFM9kDB=_T+ϬI1_. ~A {ctXLǰ@/$ͪ4K_|:K";l?.[=5R׫߲fuo7YzG!-oz1c:k|ߏ)M=Bwv4ɢ~E# j V%÷{7c@hM?i~ךB[&y>C7`LYbhOpIj=ǃĩT3/ٞzqC*v'EU)9D|rs3mq V԰ h2GW9U_ެLd|"D)S\ .|9.oy;f_-L炋D$<1eJfdok5׀@$]ވoeG5Zrj1ƵJ]:'m /$+`K'iq]nz))USj;z#Uꐱ{J/ztx7靃 bZ?P5<1YYֽWtPAK4}Uyۼ6R]9]ovsIYDR|=qu!]}iVFT9[8\[ԐCh(:R!SzDWVsJ<׈mm؀kêw+ۀRe?TKA7g<|+WXMa(wkOAl\n _&q&q&q&q6Ef:|V\S-m YŸRC. S%*V2SB`g4 Qwujې>5wWT:!gp {dȞ bZ׼~go~]^h~;u|:e)SopaaG_d??ѤYvb>H)&(1YRcύ5&ci)xV䂦My}Եvӵ.] T!s4vMZ՘ZnNՠ7Ll 9l ԔQIeS+ԀHb҄t`\omY02\$eҿkXNj D1ċTI/Su\AŎ.CQPobG'uAQ_Q]):33yM0ECqEaYeL~XJƉ!kr9)Y)3:V#hT:ڦIw3CPy)neT&O^nmܷvûbnA'yxs*zx,܏ ܼ Y|[Ppq@oMY7w5­A$KH%W]SM!\?.T[QƎjkٓB|>z~~w灙'!ӛik{GܠRԜ*)RZ4- RۧCE{߯kBV׎SG᣺(4]cuҟUXo2 30,z|r Db.l >L,,%pY%2L yk&W>^WаSO_hx1jfsKqW_RԔRdDL5Si*"3J,0^|\b?~SRPZbN5j[C=.t7\i=VX,ɀYE[G=9UZʻu'7 %WNvilם̴0*0A'ۙ9q$YC1!.I8j9⺱X)~z*uFpҎ`*(Tbxxgc2ǜ1:H>DڱJ6Ȝg̙Np rp`FgogN%^GNY =-KabR0KNf_*m봖cOd}V›)d9`l8%ډsSCnP:دc}1 q4Iɖ̗Xh -(kdޕ57r#鿢N38~؈cǞN/U"QP,JlwT!_"%qwS9mQX̅$;NNǁk,cz{]4H]-<=ܶJX^&k&׎O{Kԩ47g|sܞPĔ~dS dgdLV*v>Mvz&,ˠY8f/o8uc@n RN>ӏO9cRdAkqNLD<l i# /eҐԛG->jӱ0Ʊۊhhjx+iPl9> d '|# M;*,piwnI5„sȖ|i }i2k4f9uGBe"P: bQ ,%(lVm5iUAߒ/k3_+Tg=w[b5'R󙡓q)!+Wm~ Ҙ[kNƘh'Rѯ-_CWT,%%y?bNc+eka LqPVf:}QZ RH4ͮ. Hxhq WQDi 4Lk°aʁ'/ٟjϪhߐ$2E79Ip'3" X cbL>@"4\k9*,`Jij(CAgPa)}ñѦh]?h0Lv3E(^T,e P'0KW1[#]8>7~^\ ۞pf]d%ltZFu|Ē쀥roR*|F`<]{7RR=b6qYZ3k˛O.˯BBdtb Oz`9Ƙ Ԭ ;?UϨZ.BQB?c ŰJ Gv1 .6jX`#;kq yl6{(% 3[!?[$)zݰ#'U۱Tt1鎶C7;:6O#y_K4UqBew"dd]jf̪75A}a=χG?:1jg.>.ܤJKYMW ṱ x;YΜO!U蘟3 ~q81%Uf̬İ?-)O.-mڗ -fHVXvDDcOҒtgBy!_ Qh;8 S][O6%4s&Mң_ FEd%cD[lFiGEM@ɉd† p쀫Caw(HHEk9 H<9 AF`UCf(Z xsh77-!B1aXHI(a%gYUŰ;=^eXWU-L@y͍rrq.F&mwy6g"kk IIyYNhHr-F~-˵\q㽠Q絛&gxvCpQc "xX'Fc(|O|aIC>$!<:!YC]*LNO5yfsJΎ<77yQ<?fﵙm36?D7UwcOǠƥ;^c~=ҫX0"(Ly"dfXa\ٴ@{O5`X3ҳh spw)KY^}_@RŚTڣţST0i(ݛgpi-^o_4=٘o`,1`_XBiF)MRLQ"hF9I$o;6 HQ̪t (݈ Z6%3DAydlD%iKĸQ,b(yB4Si1vwkԠiv_IǀER+ w?A!x<:ZwolùM|.X9tpś(/CnT!!^+%DFt7TB nGOG<=7c{3Wj:medu|uȩQ900.jzػiV+nםE=j0-h-mR{{?VV,;rwul6@5@č(\{7>IZ ~{7`n楙]qm{hfXu<-3'a )ą^89FC4Fn+"YQe7Jyy'k'F)! ;5]:@?$@uӚr7(A'CuܪmnB?@ hbLM};H+u*ixRXںv\b(΃7 iu1wiwc0!aG.=Mm#+x)x $uDPiOPmfݮy*9JcVz`fH??k!8JU}8N(Wq G}IKs(Jâx)nҕit QV,p9 ]́=k%}6V!Be<^yJqP 2ZNH%TN-bޅw] 1^6F"[o)&م@n~;NpG&ϡp b#;v5^8Z$=I$pL}[.\ ojnR2.>lj:֌z7ǩ/Ό_ՎD$`t8v"ڵcͤb1Y#eߌG51\^!/ӵZx;5f}=gU +S8jFw7WuϿbq4-'v)o [y;KβƵ%sS=/SmCdx}b̢䶴^J NdzRT []Rf6QV 0=$ "=90&bKHF0+&NzccP: ^HeA 2rT[`ol%U`mqɭrBZPqK~gc ,p~r 8Q]˒)Fdyt?Z a )Ԁ"uNgٽlp`@D勁Y=>\mǐ ?rNshx3+--^ z Rr(\iqz1xCI2fqi$KKi,f'r#³"raE@Guy9_DY鐯 6|CAr 3 j9I;lSop%Ά|ϔyߝڔulN6n

ϦmusqB1!7,:h[Xl<1>W,4_#%gYaC c|`PG[<AF~=o܎l~7ߧQ@>?m c|fb~@ +6x+IoE왍Y04K@FlN\?{HarkU`\f}&C?mL=,ɒ[RKj[,VK",, ]$ǹ rFj3zli,k~mc!7J3Юծ{.ѴxЏ?^~U]Y۱ZjkU")-(W-[rj9BvA/.g:mwjr$u!7A\Z~exSZ;9I:P=ZFrb#uB:<ɰ'"bL3j]kXw9Qg"Zir $C9aolQr:ѾE: U'q.tt#b%I*NǨm&_x|_gmrEѝ$(Hw,o[1 I4NI=7jt祠б 5]_h4b߭ek\Z; ƇaF"f/h~Z6mɰ5ohXZxy_Q:؛Nq`"5_fkKtP,BzeN ZI ˈy2|KOw>AtN\u\Z5!zgQh$IjHCwYH?-NfkiOS+#:$Piʖ1ʺ<W_4|Z າLNjUep"#p9HGP(o^[EN 25,j QO!%IK>k%"JlϦEN\6=lB[j1't/Dz0|pRpiޟ?QEmcL^|K3DzL^Y3gImywY)o]9? u;?6h Ĵj1j9|%}%ONGoż_?ioKsDC?K, }3W?;VsT/>D1tJ xT"Y@$4 vlR%&|,ʽN(#OqV:nzErGD6&0ކo{tmQm@>|@:Kk|FG$m7V+21i@ /M>x oAhJ6'oܰOPF((ߋi.ulϋs$x2yOJ>L^6{ȦOCzAݜA?K1K+,tLl Vo.tgg6+AhA<#&PMT˗}$Qty'u@"y)o^h6Snc&J9 wY_/(XǰspH|Q\4>]w}EI,LX<*%4S8D*:ٵt3 cA=߇npV]}?@@1L4GZ})bX*gImHJ6NJ8f.!qAii(!deipiJE\TȂl'>J|vQdi`1 $B\D=>?L`QvYŬ4? ޒiz=7Dv]A8JiQv(RBX<}'YCϋk(zIKDl9N[͍ubǃa)X !!(Qh7"AYr[]wTԣBgoNnG,;0!**h|cցӲ/6)i A6<:kLj.i45JţӹD7hY߶^ϓX:1Y;V4.ӧ]6NKc i?'ҍjVuvFݢƧW>0ʜHAv}āQǠxRMH4qwtC);PYY« w(LEY2$ :V!Vk i8RH|4Ql0ڤbjXػ_pemta6Fo)>[t9S)J7ЮB9W2ON9f7WPξnkYu?E@6~@/L4a)Z枑#eP" fc94᭵[<<1^īnuKqUǨ+qec}gV3-c\7m+HmN-CjVǸM*њh k׾yQӕІk}q%k ![٪PmDoAl # ZjM;e'o4{t N5׺SZ_u}_טrp&voҐ!8˭, qN!7@<nۯp2)%-)lY8~8@wK.A[+BE;nyY0hV-8_?wi(KM*,dfKRQ.0'dF ͻ> Jg_=Ʊs 뀃Jzwx ۝qjGN9p@F EG7gnl8+LtMs!=Ҿ&c Pb* <xt99}A:@tb8g; -jC1/|Ra zApS 9&TacDpaqEV"B Vn$zogmqA+mmN#04+W7|NCs3K\;( :i*Qn!ZT7C ~ޠ[~9]e@Q`xU;EMpo'p\FWgN o0l]U o. B9pTQ=.TvRnӦx޸GTKe Z*D}.8!ܪ;cC* =:4yWi g>7@MP`dͣTS;y B%@:A49 3́ "pUՇhw[wkA[Ţ1n.̀<; o"G@-RFȋJ_Q&(Q*(%@90nDRQQ(Gq7Pi'x &)rr0OwJq2ENM+/w@UK^ҙFx?>NE1@<Ӑɴ?u5S_cQLOM8*t/\/_|i~_5- aZ*}?/9? h~FM4/89 ?n_oיq (JC OFFHS\ ͸Z )*ǫeMh݀~(=^oo=^E1*oZpR)+mQӧi] Y'k+UQ?;و MGPye+tDDŽ5iv}PYxr̠/P(LPPq#D-GqU7Q܅8qK("ǚP*mDFGq2}[?ul$ʴyQei"<VQvʊ3jN_Ju]*֙ )%w8VfQD 9<8ھ =8ppA 8C2]6#G<8j=mF9L*;?P*ƽ~)8q=+] m=mCUg~d…wD+2d$;+)(4՝ Cq׻Gu32@uQLsqgՆ6jNb2tVox?O;F*Џ$.Z*Ÿ-r[%|l5yюGʈ3 y:~Y%Af^A(Ћ[P#'x.Ng Xt@ɓQS,]syLQ`Af$z,i9~8W z,Xe85cA#* c¨*L8w>U6w8*uӾaPW]Yi EOCEFa}9!κGadQ)Zq՗$Q4*Mxi,5Dyf-c,$:IrЍ'ihqǃ~(KD%$L9 DihU]Mm|nGMs}^:2[BKٻFrWHKK`33nxՖdY%.VYX.d5iyxY")Hċe~>hwiYLҕm( Lc%c2 D)2_̋v[sRH &uM%i2:EoYP.'2t^rkSlp]Ð"b5V퍾atk{sZRz$ݫvV+9ΡcEiΣS< Pr9Hd(˂IA5)UFVh5Kl<*:@5Jh@p[xdpJLӒgBl4Qq1hMJjshT汁IZ&,UY+gP;jذmLh]hGht3*;|EmC /}}(0Q.hǯAnrfhqwm>A([)KNZ]e°*3UA)p/^ҋJ,xI?TB<^Rył "dcE=%=$&b'eO/^|Q|6\qs>a֒Z|>?m |Y,fa,FL 6"9g_ ɰ[dWX~~mq?G C~y. v lWS֍[pڪ>Bqv!IɉrIRBsI7l5?-*ߥM[qiO.+G_%:K\_zVEJX}߰/Q%L [VIR60>u✁O~^U;x3ByVVmsNKR J7mP0ɜr8)EPIgڿD鴉pR `bU8p q|Ӏ` AJI\vx&k\ X'qKRX ]U[Y 0y'`8 @"07G|\TZ Qw{u ԠUd3&e0$ u^D(߿ [Ngє(a,kR +m gَDͻ#1?.=V* yrt_LQ&nR)GᄝȐ#4*dig&cbNӒK!XMX-R*9- #HLPf=L}]#HQ>cgXWLQYŅtP܎'%(j+>h)Ձc <-1& Nn2הE7pC}nN˔s}}޸ ~q@a!hhp }\OjϰJnj؆,zd|֠W7N[! Dډpg -$t*BFF<{ Saxq-卑&PˣoY̦3thCTX..#k D!pGCeNwv\n2<3hGнZHu8x^{[`2#@qWr8HE9=! &d%C1͇q{cBoOJ>ְll{} Jū?Otyݮd+Z9Q'% ѥ<,\3;gx)L Ci2`8zuۇ *^ AvCֻ݅nٌum7u+ MQE| 2(zD4-c,s>Nx6p 'Y3)h,xf~WxF!cKyF2Aݼ )>G"9c]Ϯ9L^j0z(r❛!;ϖ~0+M8?Uui6S-F"yAq#`KFF%i& wZJQ\<] 5gah;5%QbO+Zg+g9wUy ).Wjc"J<}/u2I{jAR h Z$Y P F 1ͽR)Qt_&,ڑe~7R^LE 1 GA\zN5fZ],1L6/' cY>2xH5v;i$$=E?zOP#Կ]I {=V)SdϩP:)|ޤZ4s^SCx>ϑu_C2/<39VD&yI}tU.Vŕ~tT/&Eq{zҨSEA]QTxy:}{߹>DV.uMdA(rx*U+*g6}yt_AΘA$k0zOborSB{ם{U ~sǿhIOx}Ou75(0hCDDVň`m`x&e~2ZF_ R(l@W6mg~YL < YZ!]4q6(MIZ9ڼck2<3դovxv}˜erkԭJKM d#Z1~AN[ 26cM{ZnbذְdW`"a=JFpOG--?A\a9>/%T N'HӻHkF<y FT;Ex#J\8>xjG$]XIGy3Rޔy4Cjq)nH&7K1+2tqOݥrYMa3J}e0]Ira ;;M| c,ϙrO z &aY'h`[(rQyv~ s,*Mv6~Mѓ/?lS"*;i?bwnТA# 6ﲻA3Ov/ NvgT^K߂0LIWKW6)hƧ:4ƜK731;]^gzG_ew0rG0] ,]lwc&[.,U)$JLڦX@Π2_D|N3ة^ߩLe G.4TH]:IӐL"yٓ481VIER#C1hȞ򑱡9cCUX:6Ý ͨ*πPC0*Mk#j41i +ӱIaV;cG'g]. 83 ˛ty4U?~88@6至:JRD"&"Wr19MZfweQur!Fir.|dX0|}פ "YsШ8:+ٴ)qjAtI C]&OqS+j f /*W ` XF `Y}t~e>Y$2KwpGTD֐mM6Xu4UH瀌f_<(ARA"VsU>rDH&h_~\1I'_#cL+P <0 H"B1{BFpNx(`v}le] pzJ\*6{Io&&p*xpɘ,X۹f%n˱Ϻu8X )Kɷ_x:,:s 6@QJRqSQ0GU_43p,WEGHuv! mWDTiIvX1b."CcLڢw\R#vVnûm; +2/>T;kߨ@_"K${6&G"XA$d"SW Z9 8g r">/ڈI$<[}٠l 10PX`[?yA-<'ґ_b$ϦK=Z;0:kEh_F\$T$8e bl,%$J;YK2r+[  8JeU5GꉼǞGߒ)lOxA VDEʔqA^ά)J2f}j0(˽18g#\[Ej]GVrX͢" rR#sF3M ȋ9V ]T;*qQ1`<"1D#7 ҒmcQe*;Vif1Աp4DBciN & U ˍ[=]ޭ"(־asRr7y r-~avV<U&-@^WǙ:<(sr(Ňp\S'7.^B /KݽI6X߯J0Alu2ܳJSRC:?E'{U.VO>r~UbJڽQFM5bsLT8>8F-45F9μRJEΐ$7 #]#cct5xUAJsWL>_7Jv8\6']W_#{&X X\Ib62 CJ\8ɐp&h"*Q8j^Mt=NIJ& ֗R\rjT2=3ܻX߿VťP(fhG~BJ}3ׄw<~: Q@pvȠRMq_Xtlkd-,`c&>ҢTޫ.R7İBGQ԰kzy_gҖí>i_0O 0=cH01A; wpa!NuHϧEU8iSs=XzذǑB" ;]z#Iz|7-T_ݼ(|^Cs޼hZPYd#ܭVh>;EyZD3Ll3fbEQD"SKR!_W:pfUXy N,gc$ُhCW1YY(;y\JN+g}֔ |ݎO3ޤ]2'T.?,o޽x:8( #:=W8XҷVWz|r"pibpx)Pu q|c9L0nX'^ҾN)q"xǩq*TׇiɞW!s XM#NrXN +iWl]k`9Q9Xܭ 'jd 3̔fvs}NBR /b2/x& +joSǷE|)ܹ"P Te#dm]4FCvuci@0ѵ*x6m D Ҩ 4DL|gC*6_3kg!*\Qc)uJVcgm SرYS=P7N o0n"TPM{&| A&4Iιu:S)xVYL)yR.'u2lfO/;Ke0$ \x~A8&lSJ {[(.VvhЮq`c-0{MOZƯCcL^g-|Su=,ҼՎ*xU "OKyqOݮfGӴ;w DFi&1kx/"k9qMr6/!XspYP;Y>7ƫhC+`6ˮNeV\=^G<c`olF cfib<|؝Ԥ rW0u {ksd8Iq7VBV]/c赡7H6tN/mIvyaM$J],FA,bIFWlEϪc!ENyc#ZRnqBUC50iXxn]Gmۓq )r)=#ZL0-';K'(x[2YHK,ΐ=L|6$eU]NDǏ=E+R_fiek?/i/gW8${h_o Q1^o~{[ד[!:hwOݢƫڣf]Sf:qZh΋hFNpt Cj!GBU\ GHؙO4Td4 mG ֘>8ݪ9Ѥʃ+7@87Ȩ&9E/>ǽ0\s j0iU1gHE GT:/<זFpa-hqϻCbfu"Lٹ5>훏#%e"c-M*YAük~ټ| wWTBʹU qޓNr+3icHژmJT Cۯ O4HGcdJAJ!:}gmӣԳ#jFbtNƽKoQהj1Y!JtwVasU )Ze&F7VVnH[Mfbr''`!fu 9IX@8Nb"pGhJ_)^jdŜz<[J]SZ+ Ŭg$g$Vi^瘔`R ott0I-|hM bl _~[$&Mv$,rV?w˭dMՏMgS\u)캖/*>&O's 㛯Þ03i ֧JFVrM"TNc1?6juXޜd' J8©VEG8nP麀?9 nL6s^?aOz֙F)Ba@ 1b*AmaJ [, xT,oR;eZrV9[#Fqd.K5gV9DeTVM=fJ D٭6j9] TQ*IV2IC T zw-g7 %F(6@xm{m#lv D1:Ksݪate<.!T.}QTM̸5^K{-wtmV3XU:UbQ=]Z__^:ѕ˜I)^?M{52 _0Rtו%R#c7(UvRt,eN3)junQtQʭLH3hq=!ߤ,96^no5`FObPoI1r%&|`>-i:P 1&++cL?f>qTɿ&߶{o)lF?wEF_=>V}N4!LoW=?M>OyNxګb9L/d;j$YO ,`#&>46>.&U:A0YfmA[|h~-ȮG׽b ֝ugXe&wyBͧT@yiR7ddT0c^s9gX'!Scy2naq$50Wf>'EY۠gD00XSU& P"85pD+ʟ@w>y =ΕES#cH>b]j  ^>RoϐCHTK¡G<~S$2ICJ@"T;o-#+nT"jZDc a0 CM,I>";ih)W I|9`f}y0Q\h{2{b_SA}#9uY(HU"^QcK>u=='T`gB]j؈4Fe&Ègn>d\[)QgRټtZpqYP;>Jna~F l &.wMVoET|'s|6iwK_O4i--G"rX C̉fE}Ec~}8Gy" Ye6BL$ 4ʹAdX$EJQLrjeŔ~3&wSfO/˘/0&IP?ܖ͸Cԏ9*lhw Y<&.K_& SQm}kᩂ竫*x 2@hqf crcv2iNpgz㺑_ ,-XҀ&0b_&0x%VbKrbr|-y>fXbj["D4+0w{oiM~7NRXmLb>ⱓ[UnJ$Vq/n\ihhWYk}f=Zc&Q 7j??-Xakӻ=c[rtzQH1Bn2}Mɯ/+Xt5 !f7m8;'OOr8T``Od"'y2ϟ e'(cclĽ+=hyהZQWsz(9c: b#ŗe7PQ}hTv8ڄ8K"׳g'swFxw9;;졃wT`%MRaۚ8pm;]m(%JIE{ ,"^=P{Hh3_$l|JE $aD Nr滜oA.f7=F Q2Ҵ;b\YJ`A(pD D7+q"U"V9[Dsn2ǃN=}?\6 S7*όA:)~f=oMZ+;ҵq#S[8^ZB,d7C{^׽ϕ 1Ƿ{IU (Gʌan&971tDYj0ެƜDdf >X#C/xݍSO@|K &N.ƣ$G=mj(k 5MW7 (lK%gU5nͶLRR=@FU sdxP~xML^P2:W%H 2BnPFd䪬<ԠbT無(NXZ)>]QS:7Ep@x4`.X̏]MP Mw+Hݤ8g5H"jmA+˫26HяbFkgʘ5Aq׊8l`(c0^v͞2H1794ozGK^'9+>MuᩐR=a[wPŃ2c(ӈ#?D)2!9>4R]a^Ho q;`(KF۾K+2Ѻ[n6O|pQ` .AL(x ;Shkj݃\Yp V5X}YQ;B,s #Wcl^)ЕIjo~g; Czfb(Ǿvr9"r#A8;ZBXp;$D/26.D&D)D&Vg7s <ޑ|&H~YKo'YaեpסHFjo6%-ȍrMZ8܁f"; EgKKPκD/o2y:>6.7u L>mP"D>ޅtܚ| LqɅ|4 X?ziaYmI. t}cЯ22BUvڼCBzmsǫqr T*mc@ƂWþrwkǫc[«Qg>ٶy~ ^oAk}s٫U|V /gSUxLꐩwUg; yUT*FR*ouպL) B!SUtU}5dxB]g'ghתvϼ*+Q'׏/97O.Q߾v+h*7L-μl[\UE-PGs vg^mh܃sLNY$9"pу{w3fscLՍ]1W3vW>]eU*#.&JM(R2bqSN-EZ[P,4t2znrÒJvU>'o{YfsF׉uaU2!EREk֨Yu3"[xs5EB%h&ׁs"V$W0%hl ޫ9 ^ ,GQk#-u2#OJE"!2%BZ8Kg},kڤ=z_"TdbWNANbXVdu+D|?;?Ã(SS-4VJDkK*k˨`/um*дf]kmc1H| TLwވ 1B g^|܍7gf2Kk5û8#beqjViT_ S,y}^ʭke?~?>iGu"ƿ?o}L𥢟o/:S~T ?:y.Uzey{V'Eֿ2@y/^aGs7g3]WWs6NqIяUd`Jz xvW]9yNpW% 8O7_0Jȉ{]?l_c 9'cQ\[!P165Pi#Ljo%Ǫ8J!hR6Xeqz*[Qob 9'sA ֻT+?k=~CRtaPʍ[COKk OS**b~={qF38LGX^#=#(慼1F#EA`م)[Dq3J6vSXm3l*:o-3nC󜀱}G(v䫅\NdxE9MZ"SV/%ĥqZ#cvx8Ok{S Q݌Bfr7ܧqn9b 2>mW7d,l=y6.6ML1PPT&EOFp+j8DiY9˗M~0Iˉ-rkP*q⾥%Iw0p1\">'Vo~NH$ԅļ|P1+rMY; K&ogsֿgt12'%I֕ŗ9[)e#'&s5*(ܷF6c  egRJ0~K[vM5"ح*z[({+ =% bEQ /;]HK;]}MeP$s~Q~ܤ90>F% E(.eIzA"*mJ%" 8k6$][%%+޶23Y\9S2SOowr[F=Q֓iraO(OnM&|Unrđ`!{=96?N`Me V &D׋I'?:Md$w5ݎ wp-ؕLJR򧄐\ PɝL.L 7CUYakyy]Qhmՙ_?~Y9dfk{uw7#GF"8 ـf< ,X?B%uNj(RU\UYFREeF0#H[$[cFoV-GjEp&)zlTU)ImbEap,SQܶKrQޒ{0芝ӗ 1>*W3Uׅ![WzH9rVr\6[32w|g eZ=>zr~1ߵZn&LfY'znҕUL':18Fw9'ARW(ѻF"x%V]߇k.6ʹpt>a1 4=rgo_.gv|y~"3&3q`v590*|< Ճ390&Ɛ| O1.6;njnvȫqQ`k d= *8nMPw\͊Mb(& wQ6*z;. >_r #zjb!^Y}:uVPEZm{3(@L 27p6rmH XJV,f3m8u9?_~d +GPC@Qb wPzSmLޤ#eSlX(VLQ'dg[|vYHf>뎶В.M,Tel{vڟ~s;VKO E<%.AWՐCn@ܔ7o(Q޼\m#zKл5r5?܁[ۈ>n=whZvNsŘhStp]jݨЇz[ .γO.ԛKBCA|&SzǢŎ\THpNe2J_ #1VVC;KaAv{ݨTMl#x'_Lʎ/@4GW^ ا nt#M,dMoa+.F HgCD]L0Kٝ=>#YS3WPPL"/~_ɉN͍)-gqxiɉ֜(e͓O' ~/1"XրݪMyÎD!yjW9]:j)P{C>J \ XB9P7IOaȘJb"ay(E} n &=56Bw-6lQO.~04Vdoc#x}cӷ}ca]On˿0 "%կ#dd{Ԭ3C4#Nm ȹQ*Y&c1Yb ~~-CfBk \-KEG_3w4vSus'K :dr]CqyT0Ej]_bM,^lȹf5$W"l)+Xw.WJTߖ_υɫgLTo$S$ e )Ԥ=+8s 2jpK Ǯl#qA\͐WRB7ZE(D;ZdTEoRO {?u*c b3Ҽ O鼾Q7Xj@]=|Y̒MݕtPQ'8z4Z9BXծB&ޒquC 0?nQ,! ד9ja 4pϚtE{7jNƁgzU# gW-+/5˺Vk5˺V[YQܑUF۾1Op57blJr%>mGgmt>s;դAu$E;z_>fON..b`f$=7J那̕Q.Q]NPUJ6JCor1 :2-NrB,bvK x֘T8GFub?QRPu)PRFT__d*ؕ@SsǕz5 ^EuًR-g>XMHքȦBNej R˘^;HK;37o׿%]M: hq˃]<ҜK.vr?OjkF`x ]Rjk+]|>[ VNj\4#(0)dh*"odjOـS6rqi=Pdi|?tLO}C!aw3݊5DA8DW eLB3`K|):N1BSs"997/.=EK}ڏIԮK>hIۥ7C^]J$iȉ$閐ZvioD-k NѩB 1yh'-inNwOYES ͘꾜 K\ma@Tfb/F7 % Ausd>c•IAֶE,Y{O5$1#G tY9蛦U{FgKL>\*VzPR7"o9 &[(^aj!Ei~ bugJ\HH_:mb+e}`S9$ќ &,!ŗz RƄ̺G R`iS>ExghSwV7nx\4TFk0m[z&l0-rۋ*(q ,ƣe%cal7TR)+fmRx<%_bh2rSMosэʎo4= 9{ԝwCPU#@d,?.D@t5B%ԉQkp ҷwS{cY@,5L5Ju ؇# x:$ћn>|rY|r14me$Y/ 9P˅BwzI.̴ek,Ym?//}\dSr[ŵJπ1IU0tgϥxVZ9C+T[?TgK)TP;q0CbJue;}@1o]T28Jl,A5FC7掱/c_ !OgI0'oȁQ4g0ksS])>(=/d2͚֢"r[؞`;;\7JF-ɊC2֖Vz 5UXIQj W],DcV#DT;K{kkJ:5eޭt3ZɱȲg"[GZ̈ݭ>.!\4/P5#+=gS*ROy$}/}p=HnWb&)ִz.P]F̉S68~E;.T-4*4Xh~jG+r8up k5R.x8PhÞdzDC.%.Rhiv+H4zv'+4˭XE-%ݗ/_G,GuSGy"r`uބ! (~@ [ºCmUVaުj!dPh!XQqG! yլjYU7cXg@H+,Ю[-'r;N,1<`g1UOl =hہOɫ]DYе;AځKvŠr-A; c %1AɆm5e^4Zߨr(Y}CٙvY?y-MFa "X x3S5L5'#oHuٻŶn\/!&mZ3_@'Yv%o9M9icI }$o)H$Wh1t0y!y>N(vVO%vWAWt0&;whwvE@4ϣ3:.Q}yԷV]NTs))D9f6R @mk^wr2GvɟAg]S]#OVK@[v>&&{j\*WDL˟wvhwPaT=g1}BWoo6aԳ2ȇ7{OY@3T3JQ0ꌦՊ( G<NzzE:(?=I% 5 v.)GsUZvGdi5p|ֺHORHxs Μ*3䅔BZsn"ThY-b>ږ53ZkOOS ީJFBH0_"7?tjsZZqĺe;޾XDqBR uW3q+$ 0t %ypT|20cɁP jd@ ,tu?Srllo|F}h7/uY?_kGwb)8@BT$FҔ";*iׄ:Ca3Eʩ@ӁS)ILu۷JOaxƞ4fNSJDhr~S1ّV fǸlV%1ZTISUfcĨn y8TjQpZppXk[hK^xZcWZ՚:`-md-F~d9ox;䘃l1q=OhrQJ>:YӠ-MNv{ٱٱkf*: ?7u kXM]jvee6Aݢ %^E7-|{Wԉ߼ċ. SkMT] Mz;]*.ULJTG|Md mئdiiD#EٟX )'J/l)(w nލv|r{7Zވ6rY&_-I\R*MZ#gTU]eĄ%arR(;\ݶQfj+ڶI|d 6XcqgßW{bzc#*&3(1B9c#U~3JuΖ?w\l`IbHb[ 7{%h1è@]麖3r=Jb+GȽdQZ8Ar5 \ ,u 1&TYzr7| Qy|fj" j$y2_SNݸ8x~ۨn"74f 6*vkZk 19dSbّ0Tcw1wo+sf8W R2Fk+QRrܰQSb7mϹQnxڶ+Wvȋٶɟҡ- e#:u 5%eLaiP'Йl7?>%kLعS54xĎc648wksI;Huc&ͨ[AM)닕lN[D^gbӉQsI薊1bk!C-%oӟ]5]r ʾm=]J@7rXw'L 27SN'3١B< 5m錇i?LU ̿v󷑻؂$+ՠ-EiQ'QF9Id !\:c` –{-v2%~vjx'Pw_Uvh:';CT@q@+{Gb bVsll8pEeԝ ݊#pXu o>k)֞"J @/3MQJMͱNf`~ej`qbl.IGH}f -72{2Ms>)ufZD֩/HitkA-,ˋնNɷ ̷,cXWb'4uGbhP 5a M3x+Q qlŷ{ʫ&A*=Ƙ\79OtA1#~3nOI&ϫW1TCU'C]MH`hqEZ]|nbKOCɼ曅KJ hT߸]{ָ҃!/3HT@y&'S 3 64|s.XXʺ[!,vȋa)$%# !%gAS1 r(=ݣw7;m)4;Evª<3vU~:Å6$t0U{I}Dju$=惟$~̪9[riM HRŲh6}h/0{2Xbznd2 "ʴ<JNt9un Q ~L1%^޿zCWQ;7[$묥8+ C,ɕFC2J.ɕaXShl uR.,)kiW[dKt|3"yJEgZ3HY$,(hm}(ٷtzVdoكdo9:C^}) *(d}+/t@Oؾu @҃kvȋYSH7@TDf!Sԝ \3eyÚr蒫j!^rn.n咫u:ߌ52OySPq? fӲjų&91w~UĔ;CZjKP}JM2bγamBQN\0`LjS::hn˜m´$;K@ h:]e~%;T V # ^?h@{gl%~5h~W; p*$b<=s'̤@ߜV3/ 糅{v=Vi nuL9Xu%AIgG5p@l#ȪeڐNQ$2č-Ư!(%w~Ki׿>T+G_n~u=9=|:ZX5mc2Uzϧ7Kǻt:$N%Ih`]]*+UHe![h. C@57#G+^vp-X,V}6>yOY-+8 ~-4=RZ?Fz#.Hy~y/@;_f49!1#/=RLۡ1pbAcZzxG#F/3uz m-3./y5Hl$Po޾Jd4hfDfS9Y'%&!ခene~dW\'*ܹ̂pm'@7 ȬDkw=bX>;stz*jDOŪ:bZuzm=ٹu졽;*ko)gհF$zҥ$ѹMT Ak-0+rBjs~xɤAceC ~@,{Ug ȴҚNW!%A^!p /T"zv0C%6Im|Qu+Y{IO;TjmT4CH5EjpKWɛLkF[-0'@R"LI{_ۙP]KΔs; cH*1$PX_U2pv9@@"l!#'yE7Kbm&L &<+w9vd; 5 v? AsX'jS[qgb5D ADĈ5GtmNA:&C⭪3,&Θ=aDYtXcY֏b@K^I~ӠڃՊɳ aI#Dl3fxpH'Eٻuܹ͝VVs f˲i1 e?iZbADA,9 p(JbP9bMNT`kRT: ~ֺz友`$09$bQ&sphmuB0GƷޚk 1E @&٪:+DlSR)`SozMHߡ>(,HIHZMz !$=RE8|!f=P2hI-ךk.VLzUT Ԧ:Jv +RP^y28Q諉 MXUJ  3Zɛh 3Xrq_X\~YuVz%yLL$P_e۸CCq,-LuN\cė TC -JLB"QY Xj:9ζI--^Jڧ.f Q[0NMGoIش UxE@SYxJbklm:暊hBfG!]VW$K\7[I\Jz!mT)*)`,N=EkPk'߯l ?~7߫qlZ !LeR2,e%x,"EyL5(#h;Jd\[MK$h/Wmh` =K(.+)"%{yɫ:=T#VimX=^Ìsi[!/hgg8 >sTjFڤA~aªZIԤ(t‡9 ɵQyM--eWH*9%z3ڲ-m^[v3E*!Ӂdelľтo-"mU Rph}w E6 *mImQu.W*1d'4>!Uɵzea҉Pj)ɀ6)q&W_DY'3&m%PݴoNh AYHhT N):WSrֱ5B ,k1-'VF3O#"51c a ^ɸdрb֢#*D_+1 dn?Ge7kXS<Ԍyo8xzvӢ+xaB+EK0DIeۊ&қ J Z72V ߈+5Ǡ21DP*T<Ǣ8gQ(זͻttdjKp CiozmI-@m :4 3"WQҁnsM`3otoґgSU@iv @1NO4}@>M#GEvz [>솙^Gd^5sXȾ 6FYKʊeym:ЮCC[L_4Cv^ۡiqή9wUЮCQZÇt} +g֦O. $l:$N[3f,kDqHJ#4zG3Σ3k&kXL>}>MhߵY^/zT YnjW]m>]%),˷N{I ].b7셆od L{ԉK'j Q3bcXU@Vi=߉ߕ=CnuEPRkK, x9ΟIp3{ݒEd%b9[loҝr.%_!]Oɺi[<\GAMO? &˺5m[VŽ?:X/x$o=zrOy 1iyÚp[yڻnɒRs tMu:9cz}wSh7վwFV(ׯ@OE mjMv  bZ:"{i/R-oWKdzd{ Qfjk^#ۣvN:6.7*uFpώ'\OR^DN>俆g8Z4":>*̔"PFKRaufBW!`XNm$h cSQ!.;_U0&Ū"9BD0D,%YjZ?e~"fdViAb kt?|n9 x٫>NU$<>pSf́RނyslFJWu8G6M{~; 7U|voHKKtX/4BuYNDxwYD}/Sv=[oWRmq5e^s .BdH)<{mcEEkoe]ԝ}|E[eSY.gGMYf.w__N|8o~w)}5]@̚]:g5:]͐"_mx'߳$߳AyHlΠ. {1SȀx_OΧIZW.Vg'hi$]Q Z,D*ģu]o`4c@_A~!s b_iU3OWs;Wа>ܢp㖬CEvThek3n#f 1.9)Ϲ1y:X7KCݟ=mrU6I&lb-v۲hpK/ij'ټr/Y巠naI"{Kr'+6j$anJ4b{¼'oWtk}vi!|>C̼!|f; }&3ȏ\Y҃dZmeՃ!%H!jVĖYm^X%aOy½'3{IʙXcrg,Oԟt3IEk"vE]U]CxSL#ggz8_!/HYqr,b}ӒB7o )(rHh%Ί?kp#D2bWu g5pPΊYQevV|!gph%t}9g)YYyh~?N1k-ÁWlB3AJ-]\9-p԰ʾiu"vlze_q5kPVdDuc?_~UJRV*ڕ0"% 'х("6Hfj2S2; q[uƦxno TA0H&6c,Ii.7Hkk dk_.8>vsBrV:  0 Ӯ`̠^ܣ6ΥMѫ{)1gh_Gk4H0G7ViÆkn=>:0}wTvS "k8kxPC%/X뿌L}ta,QB#H٬/j=p}ɾX;5e:cvzqe\ ESS<zm{!eSZ01^obb]?|y6竿dگ//38z}}BѲΒ(٭ذvA/ݦNVZl$r/)JEw Xxt6ֈѥwz~=XAl8r0{<{RYj_SfY,PU/i&*i(q..槶̎M|4qF7pt XF-G2VvcmP#J_4͠p65] P(P 76}<@1 qRrHGTe yN2Fh>m.嫞Sމw0l7)ͿzP3-$z{$Ћjx}39q"}5>u:[xq=tN~zyxI2mNq.I$"N߷WiR}8)hJG>& `|y<#;VO9"BG-7Cl4AonX)Dx c`&'T/;gXj"sdNC(l"5du$B颡$MI2$((\k Ų-h %S V0;C8+w -1ZNQwFvʘ! fY0+< m(a#MĔIDx6~] W/CSOfgONN͕5{ zv* g/=p~y>ɿ8w- tDxr=OFd_ѧP ;kg&5~rʴ2s%9bQS;g|N82츆$qVWH4%E)(.ws1 wI_w,eMQsGG:d6{i݆5Kbo-k1Wzy%%-l3 ,n.B\1z9׬u5ޭ+^+tux'CnՓG)LٛŊNT2nG8~X<|O`J۬ B>x=Hh9džu+`A]úfxcJ.* 2R-P*0LE5Wͷv܉I l^KΚuxA[ĵ4ӠoR*pW)1;>mD:e.KmusQ/YmG]N@ _8N)D}<ӓdB\^fGRܣjOK'V̘5J0_o[_+TAZLfJhλ<!Fp!P@wwDҜ謐mn\aGcT0 Y ?+gm 6@ONr\& !,l@ xΌk93HPk] Pw^}_5 PnPjWLl|w/DA EQr\@梈;+#TRv#Ӄe1J']cA6|j[wW[O=4,yfjKiAԆmfpo >pP`vbm[NE,UkmrQOz -僶AƗjBB#GC_ -,[={ܛOnMc#vd#ӜE 6vxW}vd,pm$Y9MŜs0N%wD&OCZi+@ |$c6SN90%R5=sTr?L*nVϫN }hۧL-,O (8'a L1Q TnBD2ʤי+.B[ /U*rK5C2ڲ ;˨ZFk-{<1#ͬl""7>tjd]Y+E&aArt؇A1S.@:.`&@m Zc_'Us1̫ؓ['*ZXp=!+ )e4TdĂb 3rGۑ= Z~ dv dbǢ*,4Lɽ)U߳R{V*ڕ*+ɦFMdQ$,!H"BI .L6{QU߁zU|-8ol0]]G9kV{OfH۽kL1T~l^a;* V=IW*Hh7 ?/~w/b?E셾:P`kNo1P3%+6G6)=Fm.K"(4:cATpfl1r;40OGGCH9=a}vVGܗJQupQYE%M* c|ínau<ܞkFǒN?!XJ:fHFWFOZ[;:c:+Io=rfbO6>8-iRȷbOW3|+v0C@Ȗ$g0~195{:޴Aj9'uLIJ~rzr옃>f N⤑G8tu?SWz_9G9Nä$a#7io^xir*X\2IeF,l>Q 'i\eHV0PamΘ=Oi6An hwűyW6{V篎gL&IͦMpsIl[LLZxx<]w=@W21UI&\eҚ%cysJϻ ?,$(KAJ$R OP8VyW%rMn.[ ( `G<;mˡva$>K5@ON_dvs3 P A5OBΓ+M_m)/Xa@{Xd?/87$罹!]A*Z[siI)~% e3Vow^_~yЇ;uEIL7Ydo|L!{Zxq,tR$hsf%Zƾ]ZrkV8pxte*. > B Ṳk5ƪ]#]vi݅+]Ј@2Qp't61xyϤYH20od 4V2I$E9KsQUJ>-}.Mcw٥jJwY '(MX I A9)ig=3&=i7ϾN`t zYZfk]u,\ Bn, ZzuDW2ŋ9Ar ia[.4 C팓6mpL!"ZC.;O^)̉``79 ̿zP &y04 |$f Ǡ]PfH.+)7gwP.pЫ9xlx2{k2`9R̘nAZ9dmpQsBA_5kC/Oi9 j-F`_B5ՠQ֦ d /V"h^m?f s+KZj]TF9$dnj3*|5tu˥ZP$Չw@n~[J@mӇZwٿ)G>]ɖ]\\:68OH6$aVߕ#r 飬dTrƛ 2 K2](j^J]^zB&@l+߻2ҽG?& ~e;hO[P]8W0owr> C ^%Hb4(JK= ,@#VtEE I%Ѹ~HIyNR %y.TvNktPw ("&")FJ4K2IQ\oT4~v*}ilk}޵3޽X>-qīsi'~-xc]s'>E-}/9|J}Yp7k&OFP㹻}5QԪDSد~2/vH켓~U m:+[a܇*5#*bM#ƱV÷}e-L)\UScIN\ S}4Wγy_9⫼gqATERn  l8PDhEH$LR0 C] jGjaW|oVH+\ߊmvr3h4& Ⱦ0@g+,q121C)qP=>%Œ$*rq>W.\|RKQX 0V1D6 .eZza%Yǹѫyi4lrhʳ2|0U%lK 榧Bsj xH=t>SaXE q Chv+wvnޮU-"RXI$2J#B4C].La6r-UvTIW+LYgrWeW ZhPJEP>TĚH8!@0&KPƐDDX c &8X8 EUqϿJ WVU9s{ wd^{ܬ? aӲYht; 1(>ѫ.r*L &.s6uz89.P +O77aw /dɣϦ_Z+ŘG$$(*"!Ap$a(@R:Vp^ lL94N#@Մzi#c|nn\NIv0|D0 %:p-Kp*Z)Y BD+jbI Xi4V A3XrQUB>=oUTU& $ʖ9h>p>JCU= U޽J:Ī«Tw\^]I? bRT@ҨT > >̹I5_w*C^ty߅yxc:c$]06?ɴh3w;`翴>=<`J`nkz:O)ZY-%/Bм,/IIݥLHɾ-S11J":&g!,Aߔ ci)Tlv&1A 'Gau+XVB4z^yvU&v)36 $, 8"b ¡c53jM*Xwo8Zѐ.Z2MU1nxQcuTVF9Hmdhҹ'BdHDt1Eҹo| B7Tk&7tĪۗ-u.Rʳ29N{ΈwBFcUEUKx`82vn4ZrnTvmqK7.鳧7pdh""'RR' 2g[2wk%Be:o'wh#1!l@^ BL!nV`ʘ=m&`[a1AژD1e%XIsE,2R0A -S8†Q0<"#FBa1ҰЍgRJ&: !V \(X;[0%qXJc$#iw:M?;_}8tg1_ϠNM@зxP.֚\v@Ԁ2\_zsƎ-gCzӵ(K5edYR_־k:'+w>M*it'`W;1Ofk/~KLe$ۻkɯȚ:289~= PZ'(%-fPAAvFdpR}FPM0lws=8&q4۟BFla($jDP%_֤'EW.рfen%',^pQïZY)JUkeI/oIXv'NnEJ^d^}1S5Xg7J 7Adw=f XIϗ , +Una4@%8 b&[X`཯wg^B9l5j۲{BOz{1^nc+lqfaH/]l/8l1u3Cݽo-6캦Bb.;ӑ6Uu8e$D[K)k:Z+Q8 `\nSo&M2o:QS2UqtC1n@ .Ѧ:)MaES!'- \;ٽZ<*Ӫ;W<GiBQ:LIDs 4ze I{(&5u:4ԍIm@5K$7D cbl\nɅ$26 Xb4"dkP*LB8Bż% !*5M4{-ғG޼=q1+5EZt%_qܓQ>$2;o=Sqe }ZlGD8Y{ym͸3ߥ;q0oIsw]ܚu?w}u@Ɣ+ڹO"GoibD,8d2t҄8Teedl2.[wN:XN# sy:w@ /OgN^!\LR̬V M`+H6"q.#1Q%:a>ݬSnɼXXӅSN,j2p7-bU R(6Hxυ<>Os,F%UvaeI@s8OY4/GUYN/k-^Wϙqx;=]GfӋv;;l0H?/o5|=wb4x,i|ztx O?>|n"_wπrҸצ?p.&=˸_-)|?s=igvݡp&LͿmz ׋o~M^f2{]e#u9DZE#)"7fePNZȧsz*HUuR@ }Lap"aF7;iOB3Xc{{1.p4MW^3@ӟzDƒ/fl9~8 ̷g,^Lq6ÛOnlV7߁']/GYܣ\TXx|}=42̸+}Ϡ@(&'i~L0h4ϲ.Ǔh ps@4'WRU uhNS=V|_hҜyY2'3~9ol~4yR抝W{˧r'qabKV|TW>`yVn6 j.d4Eē y}~q_BkYBSJ[x3zE-}5Ʌ8l/2op8J*y4I1ZpP:F2|TSw:֧<8`܆N4 VK"m}}:\Ҷ$޿YeG -M1YT*gcW\wRǛ[OZo+Xt"Le-o(2Zĩ Ab@*j[Zk:pk#8 EZQ[*G{՜$`R5cwu(nF-8J!ʅ OZӁmBLrNg5w{[Kms:ÚZO'p`Jfbמ2G@V%')yaAru(v&M5GX"_=NGĂAQdXGڥ[ $Ns2 + /zAT:EfʞðJ* T\P!+L85+xR1!2 k%J-\ 1S%8oo*1M6Sgb]D'`$1'Oc=\'&#!HB I Ln."Y('tɝ65}XulH78b C7:} X[]dmLK39/=K!nٲWsmIQ5ÝwwʊwwF>-a'!j P3\|:/wÒscFdj%Oz[[ /| uF CkXk7'K{(3GH /5&&j0qDּTr3sL P=U(|hz~f\ T,>0}4TmGs|.8?㫁c rx5=}Նv3h$j6ǥL#/ʽKTZJA||s22u;'SddZGE][L13 KU(E1eLcTK62O5@i~i@EdYԂJ)ܕdNAh/dNd:,> yER/YPhp!F,D(/LT ԉyA%,$vY! Fya8q]F~z_5sA,[!FRvdMuS}Qp9^pY'L旕pX5 vUeB|5%~NӒKD3+RCIS@ c$ ;9;mAf,19' H T[&)T{!1aIl A*e`{N3ѡaJSC>Ɏ=H] @IN˸|go 0_ހڭjj [(Lj0j5@E^k+0z%;Hye(BsA GDI+MS1nxE w(%*^;=܏q|;Tr%jkigLGdפ3v&;}mCFb%F* /8 QT@ܕhx< kOn7a rҞjcGݵd5:UijC RA5J8RPiNKPxIPFb@%uy"b*5}Q#Uwzy\*uyܩJqԩJT+XMUzeAs ZTAh2 ŕB/KUbDRE(S)/ hÅD3gA%gvdN̗%ɧɒ,ɒK|p8.lܰLJq8dR3"xe0++md"а5v24=i((pτQJ[xr:ޭJ*BCQЃZ_%$rN9J{2`J?!RxƾLMHD+&u^|&8m4^ܤ c a,nTD9@({n} b+!ɁDA^?\?] a< ivmNsظ{v-?[T[qԐs?n.mV>Av۠a7E2H<$Q 3uҭØ鸷RLW(IAR\d6pU:E;}MY0b"Hu R!koiG7Aq0a~ nBB*S)onnV_<ofᖉ7ۡz+5|tD 4𤚿P Ck5m]xM[5Fc4\KmUJE-x3fp1T%ݤ ;xQxp{h>C7FDUiz.]tCu폝y|jľ ֆh+6_}p*nc䜣*E))#!jU{"UfTkyu*^%$EU\{84P= (gOɳxUYafYz^ǂ!6sҸ:t?XH2:p䈩J rJ,LOAJJ'3=qtX! IE9,=RJuYHxGhvEE82/!,ˊd/W"k\j~߀w@<\сým!H^B2"j"#A</AyCJRX*%i`jU.cIrfyٸ@|{"7r[ޥ[/Y6>'WIZ#XMy̛ER{Vo2P) UqebP+HCGH!BJՔQӚzigj!`|wQXJ^αQk7w^Y {Z*:ȿoHÕN[-kAR"z8)1{Dq\$ qS[lP&u8DoR{2sMee2)]M[晷d%-$oɠ-YjL6ʙUi^` |pBh +bX;- \enfm|;h&ӲB_S_ce?ǻc T)ʈAFAK&࢈T`Kt&hͺ+ 0n 0V ":5Q*Uf\BJ%aDtU Q@g5Zq3r Uye#A gy wXzp8;B7rDm]^IW))/oG"!?%lԻChiP^4m(҆f/,Yh>d4Så&~}UVW>cBԌS{|& Xd"*}{wY;-酽rW2WO^bT})ĞXiA(Nuڌ/qej<&XOͧlL ^NPjn.Ӊd'mPB*>˕cl`$ )Ӆ V.U%"]+ꃂZJe-e*RkϱƖy읈x׶Zax!/o0EV!]PBYQV8 c6C [*HGIR`wi0RL bzx`Y!MDmfq ИyF׃)Ⲩ:BGťϫ]6)W ml `ha,0Jp!8pG[BL!$Z}.(7YYJlQ[IJЗWbyӅоi{ a| ]i:M/_sҲ[aV}u.CY[JrSUkA 8JrS[hu5> hH?rNGY>X4f  GИ L"ońTɼ F& [~;ޡNt:d lZ0-RR݃t-b5WSE@0X "#Q,wwض-!~k]9aqg+c ï/ $ XGͱ'=^eQA:P{[pmۿd!t D> J5 fC,0gjc&-(hM!4wZbD7pdPbu:hDkɺՅ턣Ķ s kn`(z 'ƚ u$v9kzwa_a^iIbnU; E{'%mP›yR4hJ?KczhPaG h`OR4,6hlā:]`ja,dHSb 娄S"xrP3$B1ഄ,5_!`4<.9WN9.xZ$`!ρ?^#ݞ.oO=bO$E)z!r!8#fSjAWS'4OΞ*)N!N8"- k"4[OӴaÏHP@EO(By9=bD1F1iu="EN$ lYmy7!P)^o6~(4$HVS>pZP*&:DɋQ!jBb])"A5WWJ AI;Km \%PC`jA=mǎmXcKW nn]lǦ5ijvm@ nEs;}x1%87inDݽͳ_ۛ1 ;BIWI=_:xQnە"dQ7-wVcC2AȵoD+H L`,<;H&h})C&ȝ9WWHΠ mMfgw8/N  GegUTAH `ZM$31%/ :D 31f ᏑJ:UPE_4h"T h]|be 'c2hvI Q O^€;46L7P V=N%蠍 Zv<&~s/Z4F:URXHq)\z^ѼN D#_&'sɓHk!Ӭo )GlRjPGJhM&Vw rx۠$DO!dXb\O!! d J@b+d2Ŕ {΁ ;,a(ޖ$'U}wX1p:[XIc/Dj6gv;Ԝw"d'Q'#@Rrԯ^NшRh 9]$;O')QLmC+l[g?ox!6@F_@HԃA‹9%l.4Ѭ]@ Aid Z5)42Y&&0JaH;Rb_(:u`XS#CwyUObKK{|爤eG~\+Z )YcV&:8|Wj|Фj38Kq+Xt/XzLyKd[n6/Vb~f6}l\y^lJ)8F^u624w>?<'Oz`~3vWU{Npa}OִF |=סC` Q)`>] rTGscңgo3CI-h2Nwjz_g9e߳]y9 fr ~=j?u͟oXwJV))S2"yI{zKS32! I/U,xȮwe"=tp}A`Aǜ2`1.s|ߠMLiT|, C,a()270BtzUKo̾Z\P -@IFuI$(L BD 3@(9rz1˨#;I9N:ߓ$l(dHt^i܂z Ԕ9?|}W¯ӿ>ymA.RםgVMr|Z~Çcrvv[{- M9qh6jЦ= 0 ׁuZ/%0Y=|YO6]myUF&;d`|g @Sm(漝%&Ϳ_H$}|g_v&~fU+}Kq~Q\ƨoKwy2ze<|,`' %Fol:@l>"x:CU+0A`5>2|\l>Z||O惂q Z> 8|pƪWtBR3,9^=ӏ+řP &bڜ#Le_Ț9l/o8b!ٗސSFG`'T( aTQ`і..(ÐlzىLkS[-꜁ҩ j<vczfrGq9p}Qa0{1jmϮWj߃ZA'ؘ.ND(j呠r G`6i3uІm0U靏[ҋ&{z-5_1rp#RIO%J}XYMIBr͐)j3>|E$9f`Ao ́6}BUД}.zJJTu6 E4Gts91|f烍gF?+l.`æuҤlB+)[?]Ӕ{gPm MN}HW.Y2%O $[՝jrT3xX&WLMyٍ* ĉ T`bvKhI Khh;Zc-v 1'<Ҩjhcˎ#^2jsAQ|m0A%dԆCCQ>8̧(:ܠ8|:JAAx 7tZ "Nm6 \Z[N~-O!!ڃwNSEQ8eD@[v(SWH'JjjZ| r1H9UF }Z&WhL~ؽ,.s1HotnL56v&4U!!_fAgqj7QKCPb":&"CklBS[hLIzP:5Zl-ך,zĒ ?D2 2V%W'^C(тQY`dQV%Zv(knF K?^."uRٸb;í˅,S$Ç64"ҐBKj@t7*Li"$+oJ"%ybkE0]{/ w >ɠ9wS2dД!Ds4X5*=utYD.]RH9קtP4Is⊟/v&{]<NѴA֌Iq֌QoFa&hvv3fezDcN6NeKթ; 9iwPy 5klyZR0 T0>_7eylDYQh1MR.F 1?ݹT44 th!h\v " ,D4W`j֛gӟ6Ku{3͗gb</b.DmBj\ŎF>$| gbx`-Ѳ͐"Vj*ki/L`}•Z1Tsޱ'Ú[idR݀iRwZ~:HJ(cDcwjVE(TBM.ժvS?v 3d_1Mۼ3, [k^*udE0XjX;{Ї_ohF·m!|!iz8il(0G$PkaDFHT#J2hCe{[䕘TroRaoho lfvȗ뎭sN~Ʋqx́T`m^g-BÚèrpG*ATW.BB2QɌwVpA x!A*=}5TY~b6|sK_0f8d^՟v`RbHmZ'=!, ,IpB=DT I8g5JN3 :$Gҕf?Պ E+)Zq&=t7gdž'G I sD$-p6q%q0H "ӆQ>2S:@ Ǧèd%zi MiC0XAI FQ2k{| ?*q5{;6|Oos>lf(%u6{~@io?TsbӉXۿ?'ϟ,"- zRӫuV_."pV#^: (L7a΋x 3+{ wf~sFwLha巄TSs^x7j)`P qAV K3؃̉Y&mBx p5 S,j (QLH7QΩ@CP 3W%>^BdvNZ̼)^Ū${QkJζ7*X&xTXV:wmpDѲ0`W fqP(lx O1C JX3JIXbH Yq43IQlU4 "nn[Rd[j]PAe"<#꼓b _*bmm?$|^ +C)"x.r|Y52ѫK|]/ @V7[1n-e0oOmY:bo_`3e& E$}!3Xp!f68S \ސ!+RcJ׺VBP}o!?~vB 7wTnʧĢ9bw4?utX~or1&?iĔ">v=1VU&NΪ" g*zh`Nn<4,R᪍)*%#s ث/.Wsչ=4VP6 hT!sґgS*ҧ!XreK0>P!-1^;'hBl= xիLF7YZ_V&| %CN|3Zg_'ds>q'^>qT,pC=(0*#Ju'2MvC(e 9 $uiIFĊh2`Tx~z C?Ll:,{ϰv,N딧Ram$V "N!hv0qPiߧh]¡@16y @84%tf[2aF8k^3G4CAk)IHM|h($ts$Du>]9BSNP3Qځ2*=^I+C Fsʌ2 aJ(Irb$6 $v ] ́v:^]3yeP.hˋ!kk-r5+h<89j+MZg'I#p;#X9A|I rxf^>b̓DBB`ucjvD?CL% RL= _BŲܸH[l(.[DP<㦿[M۞4MmrU D죙wu[SŇJ>`WGIX,")bYD+5Y?w)e["\UJ,1-}d+dJ#AD"EȯX0+?L_-B5}_ȧ144rGϦ9ET Úh{]HOGF6˳Il1a8f 71p_L-fƅ^$cr?φ=?2zT 0"E4{PVX\-op%|Ls;_Op|Vh:nv22C0˨\SVʏXԜ:Z-jSGGbS79XxP>cYR{:;B6|Zn5qNcyPc1Tin)vbT ȭ`"[`<}74Nc#?Obuߛ:l4P~do)j xJ!CI !|yvk(Ϧ<a@<`|Wx,efh9_wGYCȩˀP$ Eh.k񺈥O) "?#MQgH>j'.zSAy]t駴|ghU)8"S tCfvת!>p ozɃW̟MQQS,5wkT=͠d]wAxn4YG޺FW3^e"U=dNsY:6S@BmJY?bnkjU@ŦV:sM%:AR:-8i]Akw7.Wn!w[ JUmO۾i燌0ZxS,|WU&ۋoӍߌJՈrF; {uakC`V)!DǑ='wt5 09b a^5yJN!nPHS̾w3{N!9kCHux'~}[8؄JnsU+X\qh t]^hɏW2MkR7cvPa,df>Y)UI\'YU[T֨6jA[-ժ/0[Q թ,SV;d@jiES(dž vjUfu ®Pԩ:P^=aҚZuTA*v!LQͤص*QfFQEH4]qsyE#6tpEii\u֦$,c0Io"rM;QG˰s߯ͧz&vfo껔#2.yg]O`Z Fcڢ^o_[2Q!q} WW | 9\!\ uBwD@4qL*"T;$XVA9K&d3^ߴ3Eз@S_Z=k Ld9\ns0,Q_@MXlH6LP: 83sէY}:6Td;n#IP/hya]4w1/ <ʲURYRjo$*QWd2)X.wFF~,+CX(5k\y~6"D{o:;ڌzڌ u k۠؆vmE{mYTONjQݛjOjv; G @y_rQnSs_=?7kSfүR $RrNqRV__o8+z`quDXUPi)rS*L฀Di, ^;5c q9/ M?$YЧhK )dt9_h LL2J$VpG҃,pq35:=^hfi%"VM \qea$u>C,ofk-%r8`<D-G!4F\y 3**$+‚Z!ȧilo#G1 w,h;acdc!4yn%(EcE@x iVq: x]ܼ}sܛncO>U>&ANdh@/Zopj!B`L:+t!-EQycjWy4;$BaFLZ4t*`%R Hy fe^mv)8,ڂ q CAʂ tƙMc>kH$1A$8,+@ ((Ǥ0`c'^#5x4xE Ĝ(Qp )v46x LxX4 mbRJc9Ŀ&k}Ԟڣq*Uنԯ& ~YoW{|o2X32Sdkܒ}^"aoD'X5zQ7w԰R jj_dIP\$THlgz! ^W_7+lFX'˽H>BP*,xXuXGtԻGF".YU= ] NKMn5ASa!x9}*5cP% @*.SĀĭcɏ_2 byAc}rcMjuq7ъbIx2T#m^?7KQ}TqqO]dDt nu!?8&@Dny -uX'MqM+f[2u[h Oq/~eOqRwFJ:&|hqM9M%#ZWu!?8&T%xMˈLiPO|Glr`5{dhȴY44n1r -uX'MqMu-Ѻڭ Y4x%u{!{Mʦ[* N;hsoIPo-Ѻڭ Y au@޶9)tDylrXM,؈it%YQ;35+PoE0WMvlvG&wu.w8_:Z(My9~εW-BM"f\m DWUջ5 >R WB=CwDʫ\4Y;YרV܈q&?뼶]!U\,$pW)d)0uxJgg-f_`qܘo1jd'I8Q! " DXC)PJ1 IC7j5qʬȃ B]09FaJ^e3_@4٧2?s+cQY4b=.'J⚎O,&!|(1J^+)lFX()+ (*f bgXb]R(To"2gd rS{LLGS_zalD:*7=v/o6$$׮׮׮׮UȈ0Xjv̀3`rрoIɰ X5hm)`Tk,C Onܻ#WfFHlKʼn炮<_>GE;)[jM.| : 1"!е碼_W-&WW𬮠f]]3B>`i< *L" kDŽ^Q7`{FЌ5"_$W6v928q*@t{jx REbiSl0>(œ&ǘ0<0mRH $FVOqZf=c(V0"PI$UK(Gx9`VCGVIP9p$ A9#[ כAov*`í75*L3@RDp%1#4&-yS D8=r bN止S]EOEJ(-Gց |%Y  |"#,kbTpF>Am_l~f|-nJwzI|jTc 5fku1p 5 u0)2~g=AyXx9/3@MkBMG bi5DO RhU tY:+1?%b67/f270%3\i8͏Ƈ(/mn6q&*cRvhX(k8ܥI'tbD FTg<\$Ykz8"F2Jr IMτX 2 o׭ıE ,.ôG<UϱYy|`XcԛWdCkFvL@ȳcYa4-qS"ƕI]j |2~9Ы ٕP8ܳfW$9\(r>Db]sW{?͖GDQ*s5a ,IN̐4_%EJRU!ؼ߆"pL$ nՏC7܆ۡ^HHڋG}0ed~kȚsykڛ`KYe6DՌ"!d]7:79߮x< 6ʖowscK놫`=2qN^yZgcʅ󯶯VV$B{T.*HH@T)3=rbL{xPi= o}:,5kΣ@'X{k+mnx~m88Kw|} w`*H+EU_&f>X2٫K*VQkh9SD yGXs%CƋcp!$![ \pOw/mgLJcy&1 $Q<7z9@[4w>% ״nbeu~%+BeTCIxs|X{Ȩ!A9SkQ|p6qZ6u>1y+NJ1D2=\JPQ߮>K.x&]),GU6C/e>41:qpL/Re)~y×p5AգE>7l6}Zr>O@N跎حm=>'"iɔƤ8Ds5@&utxF̥0F{)xLg }Sӯ(+ xl} <[) s̗&l =LBd%G.̱"Xdֈ5k-p>8D9v1_"]tp.LAc9}3mIƍ@ vHY Ng @d f&`p)ţg <D°T2){ tlRu;'Q3B~ΩCNX[uj+4[|b6+hHzzlGQ.~~  P4VMgk?! Ljh{84̿AP:Tsw 2uwy¡{L2aO9JK]KY ۃO ϒ~ݮųx`NPP呆%Kw*S&5)utN);vg$Rs|`{*A__=Yo ZZ/PnQ&hW[HT^|!e(iᐪh}Μ9K xc7-S dAg}OfkϜU>,7QEmBCh^VpKu.Gb!!YrTӦaP^RmA ;ob\k%m2W:;xmec?g_r ]4b:qM/$ B6G(rU5|dJ&#)2ѠIGzz YiGt"@K&XAQUQh׀^˔ڢ? BQw{A!ruWFolݘJc-v5t*—m ^ w80c춍iζ\}ar1`]Kg>]Hّ\TPs9̃Z;"_S@Ѹ⊣B2Hsۦ5S?JT*JBL4ڮWھ9";qƹ96Y5LrB:hҾPgt'~!s~9_Ψ8a9zTFRv4C ꨗuVAX-Su@ÎEAgmm\w.J-rG󢎛67C5m(DyIB$F]}bs0R.jL=dV07 "6P`Ðg hrDI'^N{Ë(&9uQC&p(ր#:>(D:v"p@nJK+6OMa`~TKUXLع )PMEUlԨ.;#a9QYX*Z,{QѤe3ŜnI E acڱHKh9 [|9ΩBR<u1N,$̷> ar0Pv>eg@ 6: s}[uֺN{ 4$v5V bI~M#]wkGđ:(G᥀Sdž\U~nIs.bM@/ٗGEq[z$P\{*7b0Z.;R!W~M}*]5a#Z޻ixp.wr`]ړt EA>ɉ{};ŰKbA9u*\3  v'^X\L^;p@S9F:؋x/ 'KPс;: .̊E4B%qBQP"1IǙnuLsewBI ~8(YlH:6#D8^lL $} T^hf$Uc&u-d/u*С ut4>n{(DBۯdz0Wq1)EX~ʪ5J1!m5v#5 f9RMf2Ï̴͑=PgO\f-uʢ??[jvf?R$p~ c V UW/E?olv׿b*@G; )ՀamT!!㐅6hrD;hpscRخ (% i+L^(׃Ed\*@)fѷZ7Z*P )Xj~E{}VeD!:Wi/^-rh5oykDrmE7 O,R߽Md!zw^:Z,If5z~2)8Xf?$m5 `l 8&!tIR`1Y:S } u f;)wk<G38QGS=[k_?ţE.v$34> uI1t=\p< 7[6XC7 k7G+n~A""ȑ_g%oЪ&2ОO'nӬgL uԽ /Lg/v),$)C.%+N/ p~Ds!!Q@/շ$!V'LM9梭A\+bɻImO; v&N,KtbQ+TylBJi}@HbYWYQa%`ݭV @.|SQs|4k7Tˡ_nߝ nSÊm$RF:M(5Djsx!.}#4Ч2&$ H00Iņ}D5Z4I""B)A qQH24ŁI `"2(~yZ9>ãCc*%4%>>f6b:b+{638Iu`&(APvԱ$X $(E2B2"&ꋔ BFY,Xb%gLF#*5+į3/P{`H-mbWޯ٬ +>M/lrK쥯9RSE#ԝF64 bͦ_hӉ3=2H7"} t3޿OGVNJ 3ޓxhK5^ `9D?߼4~gErsayO9M6H$7yGf[ @m~\9&LKdiF ;#mrYSH_2ػe ׶_zҋwR%%UΈZ>m#m[Ȍhr|I52ԽH %y f6S@nwwL@Zqx;zٗ\)w3w%^YYŚoIT0̼a/K_yo.oG0|oiW"ob/[QV_wϧb~TJֿ &Gu`;b! <ͭYs4/*bqUOYEK1(QU?9 8 U(c5GgrqΙ)x0)jmDUxsT hG*X;^SKc^*Hv*"Hʅu"N20m\x6t6^hUnB&G,їskFp]/!FXS:`EEҁC'KDĚ&58%ODD8wm@IN]Ҝx $, >L7e)VKN_nzK>+zDEh? ^6OӯBoF7 @}l6p{aRK&꽼Vkmj0;rź{Oئ\0$|ein1@O^18N( :z_]8 q@"Dmרh$i÷!!pݓ)i(S (ME6)7G3Zn  >ǃG.}4_ݭWvl6 eYTAye$ Oc~ȃRdEH'hDYhQӪMIѝoUz`Mt?dM6u]di3dIA1c*X@,iW-W1vaZmcc};ek.2l'LIlʱ]/ |YnAj 0XF<^bP\rwZ H-9f c, $C:N!LHiR u+u>V ~b Ou8vY >fnV[ x @Ѡ4)? ~4=Y~"V BЎіNQ7nGz_ dr7ր\HE D?sC[w-Ix{tDG$VWZHd1JCVԑY+\8IDl( LyJ%:k^d%H1&yX G }BXB $kk<@\5X]d5'V>&ܫ'1)X<#ot(Pj6*0SVOWtȫ|$wsx*go?{͏a<k8&MΓ<=G x8=ۋU%]M&ώFaiیz["ZER!D 0n[0j۔={m3IHZfҳf@zzs(֢+Ub,c>YOf"Jא w+ Eaj 0jj @aB#pW(2t?W$Vso?lZ?ţ=+Ap=~(jMX ψ-#oV y7# *pJ]7>F&{OEEy۫K&+INH$1f1"Dvj<"1C1Œ)~t5f'W=1pM$&04Q$J44 OE`lutSaCh#1@[J(D0kv+Iʕ>*03z _ENenٺjYR]gƗo,qeZ P&/|/mw jb'hn6( zm +0ݠQf*3 jBqbDdޅm;Qd W(m>唐e]沣װP-ISEN LۋtU @mby :{~hpuʏ6$~daR vg "wPI3ap}.gz^:%+:;i #CN9zSe)3K9{|'.ߵgLfF<ƓnvROleH' =<;wݪu,llY>nPpƜi.[:]L 3O^:ΥZ?ūoyk{)ej-y~Pz`"ɿz7$ zb6{ݫlAf5+eoxʿ3ُh`;5[My `l9g -mor$1 /A=6#+a;* þ7%Y/F2 ͙A&}I`mDIN&Y?d,S77[$[=ĶZMV}UbXuY쬄$<:S< ƳQFqژ}͡PI]'v7Pg9aE!ti$m 4~`4*T=?'dq%k hۈ03<#'bUcc@a=%돏 j(xUB`&Z<@a㜃󂸐I&C5f`פ Q2`%x4޹9DU4o`)oMxʨnc5y[~Efq$6ϵ gEn7'fZX8~=j58)]p؝ b;Ugι+st5]֥9O*`U\D irZ*\3)  `pFکiހύiN $0S+ IIs9r(Lr3if c3p[L~Zy%eE",)D Cr"1$I1'TfD$Q PƩh)A cVhlF࢜)56q-! 2-:';e6A*Ӛ׬8q%( 1%*'d;b=p&u\J0{eS| lr/4BH{$AtM#-lD l&x Ϗ7?ڽОsDj~pYÇtwC=͝ L#٭p7Z,ngŐ"هs\ `^o!1P=x՜uBoA(5=̤q K&)T zAP۬x8{sy (W*ǘ)hk) ()NYa<<Ie3 d !^T/齢[wlJH`-5(C.ccRi_^LIɪSH(gh p"T,>)Flq#)LQSrβ\$gILSŅ)D)hr\y^mKʥ?og>,4`dZ>N4<*TJӞl8_d-7މn_vJjL].~T?z֡as1IH]]vjhHX4Orm[ 4p$2G'Ѻ 6tPѱQ%ѫڧ5٧Mfw!gpfVvvW=/uk֗.HC0;`_ hH%``$v((J] tPܢqBH/ʽ_>N.лЅ{"jCgau߱jN0N<&/) 3o"+'sUZeIs]d"g[p5"3i]MIfjjtd.g6M[_\ˎ;QQe/݊] NǴ OzPqs2yOz?dCֲ<: yA6j QOuNjH?в"OB>YEx0<\;#Һ:*҄bW.X-_gt4ezken,/ŏ]=8_[Q@p J>䷼˜rᶢsQnVaۉJ O,~m}j*[]y<т1JyĪQͭBg.ǝ{l͚:ΚA1z?$,K2c$d;X#H2@P, cC 2AV*!!, YƒI;qfij;ݨxyoY+&GY؃ǫ[ ;=|( P{# tg>Z} E" WOi+E7JQxu6F Qhetdh(vx|!^_{//dXKeM.=o"w#ov{="@Ik[>O 6.<>0FY.\s_/U%z]f4I'' 4L>h 7sZ }DeL%J>ύl_qw\cXNǧϻ2; u p3@Ƞ蚞lON ŲUӍjUk2hE!7>zw7nh?n Io&P|/YT>0fY+*ͷʜ͊cGX"VҴOUezwwX0=@C&%W{_ݍ8{g&,KhLEKm s T  d1NjvW.xm֒b|7}PI P(BAY!>05Y`yZT8aJSI}t.sƘ?4]8i=78><2cPa`\Zɠ>'<03aĽ=j$%F!yĥ(']_RQGY{'AmxI+awr[bmcj*ls#]8߁1GXg\> D#ׁ;KUZECP[,DFڗC8"G%Y_=.-XNO{ P޿!NAY: OTX C|5ŢL2>TwX x\s#|Ng!]Ռ(ՇeQV `ۉJLբlx_l\=8ǍT<&?d^4$ʲ_!ڻ@;]ex7%V}.\:![@Vuޕ6L2nL7`PZCh1gj0}kM'C볝5v[com;zA6φ<"ci R.T, TsD:P&cA!*߆%&o nL٭F@oueϪ^mރ/0kZ %hM%g_Ɨ5Kڒ¤R?! pIsAS-3m%L f%龃-Ii۵w fmnuOm!KyT)y3L&F"H,A"}}Sʜ )R eY1_ϔvه2<%hڜ꫋A әASX\V^]܁Tq?eufS`\6.C1O҇Y ZhIJbxZ0 i"(T^ K`0fY컂KK'OiZ(cSv9aPQ~xUџf.)(Q3ޥ&ڶOU<-J_S {~67ӋNjo5?v<<^] 7/A> >B{d3]D{WS8,3^;dv;|Nfs[C?0 #ε.f+yvU)p[8.DvdrDbM^ٲ82h! "weAs+JW3S(s|!)Q-p;RGHǩ2yZ,9g C=Tf~̑6b#άnStDi[:R++FqaU=AT#ZAUUnr縴dp5Ìϱy'Oݹf?gÛKRō%308NuyfHn]vJP߃ ȥ$˩V&TPя%J><7dtd,쵺4㬌=bsT]Ѵ:#()GDWMxVs Z`>@S3*w?,q, INh_-Ϛ1hEú:뛝Ys+u]}yRw&]O*\;v!!_/Sc |܀).d)R&{)&lSB3@gQg"En!D]Ѯ ܃ӵmE`'U-U jJD؇K^]JʾEF0vXu)#!+}NG¡R:t+"hjKa0BXקH^$t*ګ\ \ΗN_W:@Zu-2hLIm}{g>fX#3OiptLvt߰ g$5 AZ!"+@j5#f#` rEtn Ο+ VTY% \`#$XD. A@'1BtJV,8Zi}e=g[iw>߅r$s4hZB* . E [YT(ؠj^x;@hRLibJWq: |;ܱznnĸF"R*MA)G>/$Zl4!."f뤊{t{ I-%4R+5 MgR1,& eLcmIML+-1 9W:8R%ۈf9Ȃg;Mjj-|>PEǘ<*å 6051qAW\ RvMRUaE$CB8P! \< rFYDrQ1YDeu 5AͣsߊXñfo--FV]TRԞ뭯"<͂q E՞:6?rTsр>i(%}yqObeXΧ.7$yiz|= #Gx trDbɮndu!+*}rEǙKѽwu.mA)|dJHJ<)|iEJ+8%]Z]x{%cnw3>'{T:F_3S? \VQ?8Z >-S񀊤zI"YqENy%.=RPUҬܺ">cr@o0*zRX{2DZ)qI_)O҅׽}o ,PQ6_Q$n0X`e*Z:,,QX!H" -*fKj%aΰ\`u(ȴh0dA`("! +ת?~XL?\];XɺeSS~wm~۳eދ3sAɱ[v,y ŖlE//I[lV}dUXc=nLjGdbNa ϧEeMN>@]Hj@'$<XH ,\ k';'UsQ#lKt7\)$y7it[Ha{*]QΫt9KD햩}CMH,'(݄BaiEN Xaftbj!;hYK:)F./I>@2L %g T>;u P*(ɢj5R9w#V})Rk/Ҿ3 :nX0N**hj'pC*0?!;+OW[#;GWN"@x6WߣLoM[sZBW|W.ͽ>:>jwy}\QOyKU4D+d=?5SlNuQqHǩH h˥f4W,8eP:@;sfApOE@YY>]h@k|#{8Ս`_=C4 wExe"xW,UdR@mRU'kH`;7 QxL97Ej9A) Mr2Y-ܦ^/2*$iCGL6"(y?I> RQ诟TTbNI(BK͌p5︳kX39hT{!-=ܤ[PkC "Rt- 95r^4Zr@ٜ9'ms<Yr t^5M{E>8j'C}i9MӔi9MzN-v 6Ґ o( :pb@qh$ 9W p\(e&nVDm_,ѡy-ˁնIQ90Q VYNVF%)q{=>O/(׵7x4*sZ8^:>\SЅ؍ygF>.7n;b9-t <)EgF}R /2@.\;DCCζrR;OgH!Ċ f{^fBܗ:AmR7AqK= QI\T1n@tWe8!T-P1ZV>Z+6*Yp튡1Ŝvi.*ZR86(^/++KJGWL,h!UEM4g 7Kt94[k0ype6Rhp)$h_z RB-:6DFkFcU?}o>%4˲-eXɒM֙cB@)3P-O!?7gG ࠷t4Wپc(Ȳ@si"gp-8"u-Փpq +@D,N>?p8$_sYyRIh |Tsuy>'ЎF7H(Meu"e+@\*BzW~ƙhpRK TFX/F: e"JКx.<05 84Dt(J_nEke 9sγ;Ϝ3Q+z4/(L98ckǽ0 Jy΄hH2dz!ZW5t!Z^N>_RF ΞX"uP ]!Jf C;CCשa1gJg72RM/}~Ԛw@KDMUZ }hkқ k>isB5)S'[ZU'zl*A ?riOiOrA"4 JH%:h)5`7 %h$Q I }Qr/}(W9̋!L֮y})3C!/ѐwlǏY(UFH5OU8h )4[j^܋?>nm4^LߝY&fLAڥ{WpcT7%A9#J hpHϕ^(A3q3q4h]aA Z$yb{d Llˑ㽑,miʿ(PhR6"hC\DwSb!N(P2P9aH0p]1߹ă[ hG1<\o:,mjչNWf |3T+^ gb2RYb VYEBڨ%-_FUUK/eJ`JF[!pI";hE`)>l8I)* oOӖ=MXduB ӣvBUJ/ۻNؘ$}p#^_zi{ӯ~8 س# WmUOm %Lh!çG'@nj1Q`Ng0O4; #X1J GL$VSs#1#[cU&T&/  Qb@FDcZm0t CFmNU VPbd\^)%љ!2a(O~l693^Hd}eV[8#!/"UGB^lGb\iͫV4.W0: FLJsbC] IK-F2F︣AA2pMg,-( W?pLvr%qIpZ&h0V>"FQ ӕzѱ&)mNkŚ2Qq[ez,)|̤?7,C c pMivTDuĿԜµ4 2ڦ65 rTytӹx`/fg_i;Pmts,tűJvYNx&㕦(Fc` ;Z- bV ZD2 5- B&BGx\$J-͖|r&]엊ʥ^v|4qH2 #|yêhGh'UaPA WPr'GoXJӥ㤴)5^C) ʔL;o܇õh |IbGs?U,h"iЄ٧I@ vstIκ/еb6-Zl=fR!Lή6Qv'KU_;+58:lZB=ǽ Fbyl7[+A%jipjw Bϋ>uTҰ]4@cQw^V`R_+:=Ն M W_3{7 /ɣ8 6Ӹr8Kˡ !M?}4\<,)Lmu h*J k38vJ+CUw5%tڭpi:rXǦҿiŒt3Pޕ|VFGm dzϿ ,My!\ojrl-ޤS:OwR9ԋst]݃V!g_QC?zaK 6Dᘯv׼r P€R T/ g g3^X>t^Q(\LV MG,`o>\>ifע6 ~¿/\4́{oM&/xd5ho˟->$I$aE5W/vfI[Fyp)}X1'Aj,[\{ݚw|$9D+A({(`g\4VP6z/D }^+}Qەھ|-j]]23,^[[.oWTj((8lGpye7U(<0ː&0v(*Ujn)ZfǮm4Wҽ(h^`^k<4rm0Tu4 doJLXFؒ/~K'k/^E/TEuʽF0w8VL6zM47GZ?H=;듔pשDT(*P\W{ n&*Ң;"]F6 ɣ ")ZEu &H,߄2z bt;VI%"𹺹>Ocj<ߚtN؛swxq0YŻ=yGns/imZE\ބvvrM~9:ͮSc; Fi~9z؋_?Q>Mcx;M*(bʯ'- Fj9çJX-68yɓ;44)hjC3ЯVj(D0Ĕ+RJB߅"@hhX1o˫T T4yWrQ"`\]tܵǏ[/S?.^N RPrk"3L>RFS<~<ՒWmjXƶ ێGz7Zu)X,% y-H*I`u:0ŚwmQ{|;+ Yz\lw58d|Q*r~3Ϯ+in8gl?#0>̈́HèUM\j9'  F:H"Pˬ̪\қ9fwUqͼb֨JªA!]5jܰ+/fws'O{;3t4Ŧ󪚛jqs[ٻdF:>Nnj1tAFN-qfǫkn*1|#["?yrd1,/:1y:&2h),E?3EԖd5v \’$N 3~K͈) @Nu;=O\*#۔{sZN`R\;>ֵ Nj4^Flsyu˸'ՠE$fHI\l&i3HUBrgT2,`!8<|7NQ;AH]¡P<*S.0̥sPԞz3(X*/.Ri"HksQH]e+kas-{M`ȉ)Ve#R8.}U^6$Z(ٳN#uퟞ?q9ENLwdZtKyb:M OhiDT {c+`A lZ(EQ~|3OqZz rZ`ێ 4kpɖaVs)xbt`1%3_|}.@zξ@9C2,W;e2IYKT8+'*ThKyjVnO?,%I\MnlzQsrvhA뢭% Q?g!?,~YI|\-Er$-G,hؕzLe,L{*F"e\aj!cC^'匲7'2!M᪬^_E~))Ezm(+RkY79 EoBMIi7 XN&0oi<AZBv0ABb!y;"Dz\.Idt.1p")rڊ+EW<*HDi@)N /FbKL;m4q홓L:eyvshcZwoSƙSƧ=}Ld bVDIIc0Dw>4*!`盛J(PA5׶_-/B,`^vuӂS`ر]ƫ7Ra3LP3Ҏ v DclDs7]|\ާ_i ̸& v9Yr\ic$?Vñ SvJ`£rd򏴞tx~74,7n@&+O C L, db9dBk^k:&X4ɠ9Oy2hM?H !<by̨6"F T&z%#( G;A2E9IwPmx9fnΉW POx;Xf>9XʬGXL,٩`s/~ P:HkF@dbCD`JZָ1/ ~}ėwr&uu뉿[]!<S^ޓSBȧߟ*,,U 鹺}BaQSoN@51[FDI? OJE`Lf])pb8Z"u2Y׻k*jw2LNF$TOVw}CO`xɘv(9~ (I@zD;8056Z{ec#)nAj?>M`*skǝ9L" a2 XZH\!R>'4]lGv`ɍZ)ɟgezAW/AB{u|,AXe u~J0Q~Hwӧ F gzbJ 7JRLנ'F@K zK(D ,A褱{3 =bjА51XOkX6\ޓX(d+(Mo> Ù*&սr]=L ۲C#ubc͔2G#/pQ:PA"l`̪v Π Jb zfHCH@ O8OYf{* pIrgN7KY[nAC V=7)%FT8%&0 1KZgH*,fn~~ks8`{b'&-f*P5Dc)@SKܞ8콗,;<jLH vTb֡%`<G"h|/E8z;>N~=Хng${ l{W [[3'/')$%싙g:;*Y fҥb3RFT}a<¥uX9S9 -P! ydQkA?BI볜3`k)h بD-%)cb*ady%BXʫb~/AL s$1(9%Vkj{A`H-VLjמͺYz4kփכe͗s(%9Xh́O <_^.lc:~khu=[dKnIn߳6ZCQZ|Ĝj¦q_|󱐙7E<ˁ;}LNh;uS-j-Cu԰r]+p‡y~,pw3n93!QLyg0/qD19Z?QFίѥYT80ƨ!ע۽0*;3cy>6_+߁, VI%w[% Z 0^IyM!TA-r(m 5Cہ"<噘[& ʳAgn <h\ Byс@z{e!w,^{ ~ _~R}+7e52~Ueɗ_NVڮFQwmm]J_R[U$yY\ b" i:45$09)W,K.u7ݍnO7Iz[Oj˥c骅fYWH}3B]v_SΌBs,r(84\Jo|Yh5fLU r^o*:wkۄN.\TJASLre,$0 7pI `/XPt.fDtkKO.a],8Y8,wjvdb)aS'k+z9Y{]<߲Ͻ_$y&e녌S$?}p8/spS%-!&꺬g}ڨwwPcD+<}i 84|xT͗ucP]dƿ6_Bv;W3S}Zk@[oDp_cr1eyEm6_wSH^z+ݸB|;'~M| ~MqW*DA!"jHܽT*#R)-]P3T;Ϗ1P Tlg TR}qJ,W q F"{ _0 prteA17;c1HM*u$1Eӣ8_šq=#ݤrm \m@ࠇˏ!"-<Roc׊AnR8,w#*LvY# AX#iBՑi&62͂_u,y7wva"T ڐBw)Zn*~,̺8؞hT(i'Nߗ8AnHVFn:/KNX#3Xd B塱}bn %_ewnh:ed5M öLdۗO%m}nD$;cW'gK-$ep--C4Кe(Fu6h݆Vm4m$H;]أ_Gj.YPPa77CW-IGs7ZǾH՚`窣j1U{u+U4v|;I:~q b֪v &qc7 @Ù)3LQO2Y`=x=О mp|8|A6fcT$=*NNA3;G:.ALj(t@l?/y3{쩗`Lzozl b ߽'Xԫѿ^ԶX#vsWrNѼdND'XÙXW@WΊ{R"Z_9q)wMƃ*jX4a9vnݥA{L>vPgX}jvc(եIFc;n=6錔vwvK!'΢1<:'8ڍ.mH4:#vY4(^غѲ햀CNEx*|}O6G}CK&;.ڭ痀)?4gmC햂CNExJһXwPҠ αv6`J[|W-8F@G 4:i, Nh;7n)8Y4ǟ3;X4a9vnA[Fjr,S[TPҠ αv0_0 ^ݢ7Tg xǚ$ :v"iЈu"IuW. SnmpȉhOIԷ5>'YJAêCRoaT!_vȗM* pd/[9V&ﻘ.4bdRJFRҌ!gkJ* o = = %A#Ejqِ 2$ok4 ΐ70 ]2"Cy1'ib cN! Ř5r11攒Q#&!<ĘJw1f DCy1J0]YScbI%Aֻf!| 1f/1f&bC9$AcsRIHŘ9cbI%/1 cb)%A`csZIR/ sRIŘ%UCy1pbR*<ĘsRIPD/Ƭ 1椒(1+!wC9$]ۢ1!ƜR41fy1.$LvY>`W!sS"g#8b;*4ĘsBI /L`*nn3O+`(,.=Ve~5fb^ޯf|5;Nfnz=tߛ دXr᪳Y\ ENX&HLE1y2^`v2&U=^ .gմXL`j7'λ-]Ls(ݍm¶ z P-AQN'ZWU>J=qs^׼ 2܋b!c&#auQ~_lZ]޳rZ`XLX1BȜtZR\ -gD .E&ᆓ(2/ .P ReRq/kwDplq{jJX4_x9},K{R>PIs bLf0$V*WJ\fr" 9c-89EB!ŷ&c-f+]cn5x^,&i6s%/,8܄IOe-r$k-sr#?./5(pq6ua1 c"X 1EQ :Bn.$r1a9C"9AEL BYUe uq$ }l+4 -|\ 1H\0`j8˳0_Go5$!; ɚ|l)1/ E\s< FRIT\Aeae&.qZX%H? -FUsU82Ffs ,pkB!H/@@g퓸L PuS5XFI!!Pp;qpkkP4$8Sțk'aI"+˨l802C1DMs|S$Tgʛ$,2)-4+Y3iCHH ^Qp`F1Q$dW#ޖ} D_KgrMDY v[xG pAXE%kYvQ}yYkӣ|r# a.d4e%|yq/xG?OobL'aܿ꟣?ekqy} 1O?#smͱi9ؗ`~\M(r&>dSR))Al5ǣovXԄɣ[›3ClN43h&\y\frμBP̕]l2FFegnz}PbU_nZ=_}W|ps_2 }2𙿏ߧӊoƵ͸QWW`}Q~( Lɼ(FUݗl]pO`G8~kP8߻ɠO A8? 3_0 gx7yF,(?s*?}lî8_{=[q[9_v˿|\]|7BLUMD 38%Rd5]>H$)&oM9)TR|8`3PD.1wn ܧ9X7<ϫ֟c1s* 8aĘ *>SCQO=R?NC" )T˜_J0zpԎIQ!1KѓrWMQf< H.:w,3VEnact|#}7hr~uJbK4͐q6*)w)g56Q4R0H1dYqX;@/?#f>0ˮf6Lsϕޕ6r$Be!@nawϼx Um^ч7HE(f(FbeEFFa<3*YSy{%-9nBÌ)[MOl1W6tܗR6AP3R"1(Bs Rm.wqN1 e}eH-=ht/-`":c2ij.5Wc\7aQ 2x*Ǯqt6t4YNl(jmD}?Pϕ=E`PwJf,8i}f+`-1Wy#Oa-笀9y6 \J.d1s<Ԫ_1>U0ʥן'7Ȟ_#h93ϲA/CB@ʜ[; ӤzE 5cYpkB64ѻ]#705~Nh׿ l@A]Ԇ=>YgZ}D]F))8y.yvV?bt̘!f6a}vE1΂F߅Cޛ=|KCph NLWq;t5jv%v ?͂L5fɝ;O?1d\$~/_5Q#&[ub+vIc2+`jO&\9Z>-̧n_`,Jn[/W1/w}XQm k?˃n{1Z%jS17@R~qoWE@}ZX &c"畻|ETo(F 7[H<)O9 ӗ~BO\РTKmGfvF:NO̙7]-;wYLu b+< @E?;eDc&sgOfI`n/wmk@{1 ly3Bx#m<<^3$ȅ4c~OS:x" X;$t駆$ x.]UL9W9 8sirMN i9:aĒ" e8q5H_594ʃ X;DRxeBppΥF{ƴ+kX0$ǘ 7nj']&unUr7<0o aRx+,o߉G!c4iҁL1):ud( Pt,Z%[3 pN֯$ rR#1ܣDh^Ʉ }c2 VJp&t\1}~`+>$z $?Mܟ+Gӯw+LHc>{?8f[U6H>ϛhLL}Hά1GѠ|7n]c>{&sԕuy?&񨁂\YAvT3!;DEqw"w`BbW@:\믓YyN0eS3ic?>f)Ni.)nhq 7]6gN>sYEx%H}vLI'Җϸ-@y8O,Ϙ4rv _p~gdnYꌇF}5'޸fy&9/ļ?~sa?Gx4 q *n[:ڦlޕ_G0}`K \~2t墟;e|.VϡCwuVgCP'Y\}~1p2]O[e"uDK{}IU۷.ss]XSjF]pݱGt9$R ˉz\U:҅ 8PLB*F: ( !I%dKB )ЭcXB/} RMW? v0XW5*6;2X x'Ls?u{^my[׹y":YnPT 9M'+C05X 1Vc`L {`ut+NNq^ӆ{pjыIa~XPZefoyJ~.{C߸g+&3zDh,uVJER)&t@h$rj A%*9V$R6|zP)*I.kɣFmɔ)H6K<5T3a;\3aMph+(*e0Js¥> ^VD6T2t.|)ݓZ `)9x)ɅKrx!Ks'x8pA72fx2~5^ʃ]FWꗽb0n: ,Kͅ΀Aօh94mҴ=z,Cԉ3x}I -c<0qs(ǘ l 9p,u6տ&RJ:yҪ D♥ҫgBpzf,יz]ߩ,y;u0#iTdh  zu0E wr"lG )B m.D !!Asp)'Z9021u`CTb%Bñ)zcgeG X~.6kxoM~uB@bj)gd>pl+~hHe#4z"&kG?ֆ6"`CŹQ 28Ne9 ӈ~M$j~a= M 6&<# N\yi`cx5eTqRfH#vRPq"'R3J9JǎM8i˩@'\k_2[ `pLK.q-;pf4F"7BB_"D sڊw*Ŵk|V ܛ۫)!1*dz3Aաޱ") "\v0GQQծH'+1:؄zm;a`Uc8sYC1fڏť\]gQvzB#.5Ua0b #=V*=*ںm;90({`Q Wd=<`5<\gF>@q"e9e -=(>m( IDJ[ބ?N?aocl` #A"dt1֍5UT t-$q㜰0ç%?~Q]Crŀ:URM-Q|3ƷB&_(=DҗMn*{." :Ud,Geݏ͵LqoOrq㸐As37]-gMҿ_.gWW:Z_=?4zO\G'-Sy,ǺZZ lgv4YR^B/囁=dOQS@2ÕXp4Hzʹ1 #Shd70ajH44f H\(ŔBZ&Kpny]R(a0;pC&lG}2;L*:3ariUn>̦+Jp/Q-EsJFx5:H=\yO9_w}ѳ̟ 4D7>=ޭ5P}f^^hEoW >SGM+4Majg >!1xBfYf[XFq"2Ct` R % `].( n.=ADiVUA~ 1@9-5 ijDDjaoaE@}-b1y+'@KY [օ_`FcAO)x+\&*}3/9B:ӿ}ă^>{S'e՝_} :U.>?ݻbs!5k ٯ^ OF\WGy22Q8|2IB JWo$3ܷLmEXh?q-/F5G<I0WxtI?B1[Yj7`,8M>q1{Gx͐sU'R (DAx`烈&Z6 V0_aܚCA:1Z[7ɾ U" zv?D l¢@2SQJR/B; W e;%qﱳ '#k'{aX)S\hH);pY٬):QiI^bh^N͝_lhHq桉`\,5bIKse4,$c9edr1a6;CpپHSUQHƳi\L]}$Whz>rgԮ_y!X}ҡ!7tfEW4ĩ/;h]{]s8WTGG2۹L}٪ .l[_ԃzY@Rä*n4KK EcT48:n[B]ܶD9\>bmKG@D4 9l "T[zC 81oy5F6Z}|֬혃v{-ZyW`]I{É jWVk~rX#S0 ,_XVd1|;#%>}-ރvG Z{[EeP/*èZQ&cГjp50A14:e gJ:dR) 5Fd2&Ϥ6Z^ȫsܢxfY^Sor"US`@WO=Aw#3|"n>Xelvw3W 3Yus&*h |큝b/>ONܼx 6$QpUkM'ii.ڴ]z%{Eb?BenQaeY! E|XżԢ ~ҭ?_/I"W,;U._/O/OUH㩰Smve:> R  KHs$UK>(Lw<*Ɨ2 )~[z?vt0MjWèУlUrr@dBbUT!J2{sa3>4 FGc~)/\aǏ_5zc:2e-F,I1 WJYJ$G:?;`-rXiҪ,9''A,3<m*h1rRu3DD;Vͷ^aśvz})kfR/59Q 7z ub-w@nhN1&s+HsuѨN8+"MQ̸f2uF}m 8_ :BTGۺFuk}p'5PPuO%&R )sk4ڪjf w]|WCq1R0%fQpjl.-d!*4c`tX4$UZ\Sj#0ԌJ(۩-^C$`hu' da%Pb@zMzN5*NG4XhiVW9BgUK*L˺uA>V.z0I *#yב,?9 T0\DzQovί/8,8yzKfQ.uOEoрisXujjx# og=+G3mRn'?T:'wFѳ'qHKE飣,ǣ \(}+Jbo '<`#zE>:0=X˭Od#X>g'@?`>/`ZXL.9DSR>؎`aOص@l}؊5L$ow KR9[ުF@Tw7+,僐ĚM`!`M$)W2aD#mRPe)²G^?ej:p G򜑭ěraP̔Yƈ`RnlI^na\75/<I`s$B30n%J lpL2)1FMrw䰚ZYo)}4{;krBDٚ҇=l7E9܃IA3JĖ_Hh('S1;/X /zᰋ&3p߲g?įDu5VjuQO ,yrz=9XYPF_!5>~%ROǑ ~ cHl(̾U1˫+M47=/0X䕳| p9P R<|_ؼ?ӲV$lnn+{Q4K$ϡ W KA 'њY ,a2\)3/L_ODM/.&E\5:6>w@00v#N(*vD\'9R r|+!d^truT g|-jjzn2bE[utz6\Of-WmqRmL^i3U-ze- U5aJ%n17eF͜椑\ SD1*`dʩI?'VcuAGb̈́Ha&QY= 4)mor%Up8K2O2ND ƥey k156tgW? au&xtmM= fywUAEW UiI$'tSbu$af:r<+Ub^)%.-Ǥ{e뤦RM|\6 "kӏ}m\kkKN{yzFeݏٷ%3FLJ%gC.$*kkt;1d*$ ¿eqi|DTܤSY,_ )ձ kv+gF2,qe4v&UTYCs`r64"Dmd{@XP!+(:nׁf3/Qy٪QFab4:V(Zg쬶&C>)rBT; v2ٞ4_ 2Z'6eT|t#UkrKlLJ;L|ιxF⨄wjjԝnuQGF˜k;վ.lnx mW@\ ֪UPM*iTթTjݏ:Gӎڗ7dky`n]ÏzabEoɝ_m1Q^vjH52pTg^/<ߏ]{&CWTfYd ~IgQv| .;v@Jؒc9 if4y)cvR lք!o1Vd*ۧ )iu= Y+? eE i Ada cbؠS"A 3Tv\G Q u+'sL,=":a5C p0?QbZkJBQAj(UflLBX`?&-K*Xi/G55+){Q@+EsI MIEk 1a|a$˦-m6`3bD0iuX` Bil|V=F\$q.ǻK4أZҪg]Z[p:Ob-tKea L?,,ٽ.&o4g-.n+O1DkV/ul8ju} #=:e`j.幤|Z&`푍­KyR`LϬZӻr6`:,#Mj0&<)f pmrfͰs[KzO.`y] -@BroxA,8Wa5ald /s$n0a_ P nH3`0e MQpvm Br 4 Xvokji\1[U_zZw>C~-SYe ,-_"hԥA1>=^;P+cZlS|>ڵ鹯^p_ٺ1#v`h6&i{?#Iբu\7=ȁP&Yhʭat9hx_%# (8ЊӼhj&ێdPB5-Î+OZM>c3tҵ?>XU0ɑM ̒D#GDNX섶^P*u-ҳA˅~eFy[2]8FK H3aL!iJɤ:%[` gBCR"0JuVb?dZA,%[jl!~FU[vWNuv1(t cfGWn48GD)Z u${4żMW@Kt{>ؿtȚ~q{JRdN[,J^A[}dQFL#ē=ZT>!zQ0?* FQ7<}4fWxWEƾ;%CX ׂYwu?(8pa6 LIVĒ][k%5^E WCcw)VQ|cz|ĕ;Z@Kts hkA+>K5E;pEooCHl޿Z0kD`-oEv{GX h+= Eˠ1#`r`N耥S1LFoNgi֕//ys@ $k9ϓ/P5595Cf!k"da!gI5јj0b\@&vXٶ,be!m7ECi&E6JhZVɡr b.?RxX\2\†KcS%^oR1f!}10}, vj$Jv}C(emй֜W[yBQZK[/n! RRppb\HyP@V)3ެ a!6P֟ȵ>Fٻ߸n$v* " "xeHcYRVd}ݒR|䯊uUx }w'W䎕P:ӧ1Xy29˝z>9;9O1[;¾ܮv cx }TW%._^:ckֈ5ԋphA eՖ;sUj8_4@6)DzE0k4ڀYV9z3{58ʣR2.NGYffm' r6–w6جd(69 x=uav0AEP4aE % 9D|TC(J03X|@truz0jACӫߵYg̦i7&kHC<Ч+e6~tڼ0i?}N7”QHV}fC|@ַ~ͫ7M.on'LGo^r4u"w./g߹ow:sΡa}p1l(5yO0ֿZm>y؋jX '֣/2F}O'\@ᶬye4f!A`{&ֵ%/, O@ȧ`Mz;ֶ%'ts̤sygipJ2VdR*9]Yg͒9g1X2><3:F>bq*;0(" ]5p&RQ8eTlp'N:ax۲%Ew[*;s1:B<P5;;naW;0.Y "1{-tŞZq*ue^$ C){-8@J`af-B䍭s1+}`)ZA&Ep9:mX|<|s1?2+C zOl`xvEݨ&kX?S!{eCaah;U.{AI+#[# 3nj$t9ljUŴؓf'YFg- tcB$ 3Ffd=Y5Jka8ZQJG}b)t5"#FFTSۊ՗~k.c oWH og: )hdk:XjF&R e ?}APnZgOPɰDa(!K;M(tX ngL#`fj%wG%׉0ulS$A{`{b2:3V%̰3+XYc \f# O,d ȆOy.dv9X7jN>aBŖ]>6{FeQ6 L*9ek$}5,`)7ǃvE%f a0ƘS3&{#KMڈIZy=3sWX@2kmrv}aGQ$Ēb1ĖY)JR΂g%6!&AdѬ`fٌy(j`OtMRF1fwTYrdE>xl[^z8ؕjFbpazX? GlWމU~,5Qhw`5yK҉urdwW o,]usF)]v@ v`M.9ޟnXZN]V 3\}}U+VM՟gGn&f2hjkN6v-ة# Y;}yLñ1Fh+={ݩ\MO9ycfcNxndK_+Dj3|yu Ll{sQ]e19mDŽp__j#[a{2픥r4 +mD%5tbl_;9.O.E۔+R݂ 6|d72D6YBQ(p5^VkhaȄl$de ڽǸ!> !BVu՘$Szm V|1\YK)5;&] ֈ#SȐsZ:Q@"yO9lw[mbZZ:u Tֈq b1TG5qSjl k@\kآ%256%(%\֓ AWm3lWE7^55`'>`2@Z^ЍVc]Do/1֢%Vn=dPYyYttQC&B6'LD^@ɋd\N~ f) 7+B\US.b҉ڏ dv@:X+Q'%CɌеEyu3k?)4[TZ#,8Z)pWQ{bbLҎmZI `_5o@$ew.eF~>G{FJv%u5)֐<ᑯrhv|pE7Imbu΁XW*x:hY۬ <yZYie.kvrP$LbC\i;%QyPt (R)VW9>hUΡoTWpXW_K^i!>8kK;(Ʀ m%c0EMEjl.,Ɣ>QVVI]b=GnLC3SdZרZIp+/?AeJ)  ygO]^jv=GP=)"e߳y{jOy1 MsR63D>ww?_^\Wq2_;"ܩ0#"mCO9~Bem*p5}7C%١gd]]\c\#7n\u'gЀҭЃcSmXvI;Jj}%qꅳ鴧G> OSA2柦,>1eҿ>o ]݀jEܡ)R *)wP,rgyL P6&2JNCXj0r+Q< RRgo^l=׎!HwvȰ\Տ ֝M߲~|Ј ֛Yph561(c6ݳkDʠk>>0`O\!cLハЮn/\kx;h?1a&# ӴƟR3vqN;d#r[pKfLئYKq|a5߼ !A(jw|!zmoo~]f,>$(= > ;ySP_~ky'!){}.h{PJmnEA OXhߟZG24pa{\}qOrq%7 ){{<)ϐ &{VK[X|j[v0Zr<.fYQmuj}#׷6 1zᡘ!cN隻Py,XLt-ֽd6s9\gO \SbX׬r޼:X_K2^yK37 υ2ΐ|0cg!3)e &ӟa#c1 ~b Œط5}dy5EV/{ZDƮ0ƽ}~VK -$$:nMV.(D\0{v.㭲\\v)o`i@ n?\51DO+94BR%W0h;a,^vXo5w<7ٵH!FjlNdv 64H<)v?iKۥT[YLrۣٔϝӡκ|c9syV_M/ ~+Ѓ( :ѳpvY]f֍aw=k`?1f4mmg,zLWwwSco7K}W\#OV"ѽʩE򘘈bJ !bXM.(H=PF$ "ScƬ[5' |Ƙq!s?r-@SЖuAu YM!EnDƮCwF_Xg|B^"ڲk`L'E!&Z'kʆ ܷO?x͏-)k#]ɾ-{"L&\;HWLiWrdiLb:i2 *]+k݈b]ѓWPj.Sj Hg htR; `[ +V5+$3dhdnkgøi Ò0L&k܀Q#ə;_8j nuʐ&gH]FxA=fCݕY4KnN-A'UK Yߤz-`% @iC9x D /Zb(.Wu XR+mBTR5=Ww\@kzDF.^a_ t ău>G~H5uXäTZ'䢕7m8OjѼd7DBQHixj3ԔEڪO]$Ee3IzQY5J7ZkIF*kgHU-1@+*JVLTcTU~wIkF>fz 4hs~|7YRe=)ed #5ځ7ʓ7ܶspʝDm-+RV`ieɫFccZ4F`# [ak\ړz] f ۯBkp=Ia3^jڊqf\a ]#je '(9 5^Y[Ӳօ֠ap#D\k7 Q;ކ$A)J/æI aQZ)оsVa/~4;9J+V fmgN\q~ 47׿ f!a=nNe4(3w ;.SZsw]k+pystt?>qIurvqܥ]r[Slfoٍv5ұ?ߏZuk<r{-\L{A|8nSp.Ow./x{=f*6*SZ ܺ Չ]#ĺMvO;>cGnx˞m U4J̺?Mhby:kX)1uiА\Eq}BKo^ǟ"݆Vlת߮-k3˵H]{fP(aȆ g'{XpvI> yRܼ10C%jPS_L0:ok4sH`f#Q(߁3v$D$ShcWSc]gd% T'xg35342-z 6vDygu%M^Gޔ: s0>N*L_- 2X^ۀ"ڻI&MWf?%ڝX1Yd<@"L^A5a<4^nXn-LXV7|$% ,rnk'cWOC bX ~l^G|`qXN|0('>)1HZ7)q{?%>]1SbD쁡[ٮMlXŮR8ԈlՍgup⌁]# 5:S& ;a&LLAC~pҩ^Zs_DϭRSZHxNAmԟa,un#huJj,a̗_HT n@w:{ tX cC,g"'e%DLL3Fp3uR~ zxL*F./|ÖRPY+XWU[x"? ZyuVKy hJU[|za]s F> kX4aIx.Fɠrs9zS %;һM˘'ʨ'ENo!8I%N~Hke\(M}qRLKqe vT Ryʒ S45Sߍ{5QԡP^6}P(ߣ_T~x|M=wipSu)bAoH7qݞpW[fηS㇕ٵu}eDŅQDHT3ڔxxtNgA+m"DP!O.Q*gT5 #o8u.-Izw+ocL S켶d@"6/* RBBT:JT _ڦ[xzt'},Q _Ȩ+l hK.`uȜmʖزƃ}vCYou'>{v?> t^[b! H4ZQlYA}CęoQ'd*ŕ!Fѝ~Mj͞EX$6 OZ8l=zk2i6̏;mŎ+\T8_ߟƨ2"A2Fim'9v_KQqeD+ټ9gP8:rôl3?z+e$! '3A:OSh!~?f$zNz'TX~C# owInyr}?& ƅ> 8ȚX#]ܦ JTźQXlYxݰ|=vKy@wMM)s_kK iD뾨Bh4Eϓk|B= la4h*Go!G)//w JYOyu*A '5.1|~9" $g'o*C!j BJQme# wMbqJ'QD>wB1W;DpiWFو$2¯ᗓ3Ik%< %ÛC7N$yx`HAj3I4'dI\%`>SJiK|*2AH?-Pw u?=O#x]zV͝S"s{v}M询-+M[*VVuR^~o,.f&/g]F_-ЍG<`L!ã3oϿ7gRh:gaE6/8F͎ޮpן#qWzr#jC[&JJ>iHHwrN>(ńJm6veu=?b2d_Vӡ uTV) DO GIl  Q&9m=xӋ< h|뛏/;&KN繾1eO&i!>OZ|;*:27O*RiSF+ X?|ay/j@^oLjAYxP D)8|QQn/*kXba*i +IJ+ gBܲƃQR׳*>[aUVv"V8!~`5FЍtt;!| \?gƯdzZ. F!4%NGz;oq ,=-}YUΛfQ^9 h<.x r,$.xb|PyqՄy`_PB|+29TdS60Ċ;}." = W=#&} r$" bYbSȪR!jnZMGXgT\x'Iǡ|2!]jLBJ.V`Qiزc\P9cS'`OD֔UYjUil![ *UHǵ/Gwq2/Z.LߺREE6.[U06EVUl׫ݯMr&{`s은o5x-kN2u^"D8y)-W"&DUp SHrc77 M]lY<୲'b-rGv"\3/K_4a! \Zeٻ6eUT{NPܺĐ\ qJ\yb!aI`c$X^Fjc\eٞuOOwiHg+,_̻+yok^J1n@?t" ݧD):m!Z˫m\p.2$~.GJ̇z I}ediQur/+qʵ+7A>?GH/@QWk^E#U/SI-㺂\g@s:*"v=H&C}2XJS J:iXMQRC֭L*TˣjB k%K0 ؀ـZPZ,kT5UUO _ Ull<Oꃾd߃`>)zXs^_^%+A]tFIcǜJ34' %h82/8x 0+E47D\8]a~#2`=Jj?3HNI%kuqrﳆ&L+&^:!^Ѹĸ kVV2Suw{G%&fAViA5/ߕF5JXҪjz N'^=I~HUDg8a3)˛i36/9>6m~j+dåNbafsj$劚k_6fl^s5^ژxmL1*(6&Ѕ JmLN)3&(ED)u{n cV&Tt>1{ïwUV;:-07TUm-Eĉ n. HA'ti8-&jIPw&ChD+~wܭ(g&X)VИayT&HFIe.t#+1cu?Z$h*k5s*ɧ^ ,u^=R<`=@R*1" ʬ>m"De ܝ{j JVWf峖O)(=Ɲ^j$LNs2̖ΖС,^Fs$8CHdf("1C:FI) g1a%`J~NۘyqkўaqyF:yzW YuLcThh.52yO M2G#CU1H -z6XJ dqU [7 %7xl-/lkbnJZZ}{`[?p]L!\RIĭ¥Aib:BkHU~!Lœ+@rAMaLR͵umJ/|02Tww qr۳G%y0o1E9dq L9^IG l|+ χ2?% ɷeqUBThOX+)T}v{ƹkFx ɢj.Sl WVHAߌ)Cϸ6fE(y{f%G )]뗉&V.ԭpVvڨCD߳QafO@;/dp8WJH0:*)^E:}vVV,Le$^+3pt;\"nH~}mf/gN}iOLVj+ulmɨK F ykkBVחn|q,%U`]9Pa1)GZ H#;*1TT2Ӳ8WI,l:Hwa4HDN8! =6TjmztXd4qwh H.UgX<[hrL i\xe*5:6^Bģ*рa=qY%xz+YД&zF &0py!デ0,?Ov3$`-4 ˌbz_X %\֘.YlYd5 ! CI iW#&NcI˾]>,'<3 ;HH}2T *yh9R*YyFA egTJ]*I ˭_ݞ _wO^Z{m̒/2ɡʒ/2+"ג~1M&{iޭ}Yseʯ$`a5 u1b#o09i<3F)/ +otIL BsX XX,!JʌF˓.BBW1D :JB2MXL3ˆsYz%E*Z)HStDv&zjDQ@2DaU%jzIn2O(Θ13.r1jE_#gcXN9SHYDeށRjI#qj5ZaGS\#A_?4}lv:OkF?\xַOÉ9KB;:40L5w`} = lCacڊ&TJ:H'@bQpxXO`+z$ )u+`̂ea~j01s] >Cл{06`8]K:qe} g?|'ﱗ[{[?v7߼^NBɏ_v[_p뭷ͭ͗ۯϐOggkw ^orv޼}oL4Qs{[_nhQ{k7|8]0.:?0dK]%d {dz{_֏`xIH ]7ótg Fw4jw6gGil̚OK17$ ̬h0MwOz~sy9z~v]W'Nl9mߜcӫ[aw2uOE3}$V?d.'?Vo;q Z>1Ϲ|Q+/ F;No-ԓGy?KYy~v;f虳|6'3;a1I΅C {R 0\{!LW_3ԏݕzVkkx;sx?N/ed9v;koa 牱'k'_cc= ţ sϻM_~f|ΰ{텃'?8vO?@"/^grNoaJ7zNjF&RDu889oarϏ68*~ Gp_N{vxcxTLg f\_5u &HNfHR-NSN'2Q )xo.ژ9N#Yowhj {-'ؿrV6S8U5 EJ8/jc{ 9lRY(dXsM]:RKp ؜vo.3-&Dan (I'V 謥:87΂YG{8 gA,h`u΂Vz-%˃=>޾pva],nj.δ%4s #1Dbڭ攰 Z( Z`, #T)Z&80+$ƗZrQ?%3x/3jWQ\k ӌYxĺ,e̊/?W2GdQշa'Lz="h aH4Ba#bANJ$ha5Nlħd'$NlNlN\\][YY2q!j\i` ZAKZX- F&H< 0xC1FAF{)&Hcg!ęF!}_AHsٿ?>H߿?z4Z ciZ}wqpzzvpI !^'3W!2Ecɼb$aʱPD}$5ci!e+h(OUdXxvxɤ[gy'w"p!X0/(N[ָ kbdč).\s D4ttbp%4bE^{c{z.Yffqo"oV̱Y,B%H tuuҬUӡbc"kHd2L:-BJEEu(f;p0")lYZp82d!#4Y@5%K kYH=^E P+˨Pϸ@ S7eZS(AH]6&5U%I \ycXcMj^~}S% 1":ThD]gT1"eCdB:Li\agTpLpN0'{6RE ̀sdR LZ&2O!$,-_!A  Du$/h-QiIg3t0a <预GeKki:!\=Q`ڟ$}Ez d[|#`j/4K&q}Z3HDqIT(04S0h"3Kї]t;.N~`^98zKzfJV@k\ _lF@;-Fqf@F'a<`WR$ 3D7J8 !Ô8~2 FT+_ַo\ebF\KvA甎h Fx/jBtVߦt`:Z$"  Ü 㝀riT՜Y+ZJ- Z9G&9 '2I$T2rIiKŢXD\y>eԆf_,8.t.B,k)QWD7W{6ΖT!VK͗q퍧 e0dT,ev9p{[xu7,9y/Ș`fcM!?qRbH-QXnEX>ݡMV1RO(hvR0Fɍ4`(;e4򷪶ݎj[^Р նƬݻj[*mbp)6tmWڐUծ75@pD¦ ƫ]=IHi@kI%SwE]U"|1/lee\`7i@@ʑzG!3IUr#!JUЊ$6v&DYj\\} Q2\ QM &N3BeQ4BcxUr;BAHr;,7f-Dd>p}ʈj6,n_9Hu,DWMBeT`Lf&F3Vc8"2{)rQiJC4T%X{|Ė1Ψ(UjZ2M]tl>7Sq:151 bXRVJBdIpDDz&!KP]s1K1JA>ɀM'r=Mكn"׹p)=޻ף 9:ʲV0Y6hpr3lx B #AX\Л?; /Oѫ]77I5ڟ(^ˉq|xg/_Ǵ.C_@f(5'?>m?}Ecx w߾j<{txƋ㟟7/}:&V/{gON>iշ\v]GN§f7ϕ7b_5o7gipgߎ>U~&J"=ftҙ- |9{!B2.'-dcui0WF:a盹538 \/;1ݺ(?}6G9hrf-$*]qtzi7v.̻|96\n|~Կ-&j"z!)Bf`4q0قiSО_˧oE_u y@ϛ˯//r:?tr>yE} >ҳtS )=n:_n9h?7ׯE~:<'Lm/7 8/6=8?";! w f_t?0# phdnuz//2PcS|IW2Df;'.qY?kl(=Ϋ3ixf9 RTj@QON֍LVᦂK>u~cjǵJyrE8?VjUݓ<ykR>7V[%KjH`S7j* ! Ƙ&XkW0>-2H@qM!Xn]e*h[A}mm+hBh;,́|&hhȗaD]W4qm,oqO^/0?ݵxò`kVNkr)fn)z8\;5ZG)ZQ:O QqKolĜ,EP MЮ6 >A6 V 2{["/\CEeL|NsQv'dZ9JLZ7j2p{q1ڡt޻Vbpi(T"gNBSȨ\(ĵʥH4 &T?޻+S T??̪6Z9Df⟀2 #3fxT)Yf0? Ig"^VuM kYP(h gl4G XuQ0 A$ *b7":+HeLmWӭkMm+hOV Vж2 v{ƹ>hFԞB2MXLۍ*25 XomߑvƏ^36B5BZa-GV&X7[Z+v(!Ҍ TPs# OaBD5EE 96c$"(czz2TZ4U̢YSç/YT1*f1F,BBK#wٽQj3sb'10ðfsj$劚M$#TJ9|8x襺.}hܼc;CɧӞ=@C"0{Ƒl /w{Gg=ElMpȒ-Q[Mejd#Z0l13Uu:&StWW"Uc|֙gj!x }杭 $pq`+pw+p[ixC#Ñ"d'Ժ;~{-筐Ϊj:8eGv%(n*QA{O )G OJw/ n!B h+F4{24SGf2ȇƖRsVnUsRo_[U[U[x*.EgsǷs_} )vSțDB3UE\t`ܕBkox X2dQB1CȾ(+92[X PTͩW*r,LfJ:+29h~kY杭礃&tmuVmuVmu; 19Sk4:";vIL-ՄИeC?Z)7:hx`#|˟[xp*eo>6+E Z:DV!,&3D}1 *auUM DF)A~]u9g61d;~]mbl&ƶMyGa FuɁb T}Bpȕ+c܎Bo"ў/~_.K8מ3T@P7oU.FVUq`e+K y?lV<}v1VlVlϣ꟏I B&Wd4J(1tXDb Q2ܱ>ҁ?D{I5 #Zǃ{OXۢRn_plע2j=i[XvF󸜋o;b^DTΈ &j{{o^|k7Ό{B̃6mf۽yNݛ?voݛmf۽vo{3@y #J+mߐȈ(;GL9=و~)h0[cV\$@ Y s /`&~4ہ4;5N[YKӗ{_pMKl%\ؙfY:. e _ǩN$pP7ʫܯ1¯=-N#c3B.*fj)UWΙSkEENt/}!ab7X;= X@ Oe˛Y#:%oó\x;J[0I/~#P2кe*uHYir w۳z6ndjt,:{#\tsb+!4^O:##ao'{ca_j- ӥu;BGf'KY# ''q{j2Nݟ?){+ʧ\ꡗbOعlNJs>!M$bMf5Q"3DhVTV٨5B߈E0w"59\^hH-̚'|H T;_ 0{ã%Jpљx~jɴr5Iyg203'^/C 3\ĨtE@\q] 7 G~d݁gt,r@P+8둌Zo\ָ:W{3g "FX>zWspLSU#9uu Tj|w\~rr.K o ENp\ )Ws?6p*#n28iiSe1rAǯ teH)ӓ|1YjМNE \$Wshbo/[2=9o?^uT& ZlӇɛ/?fKg{meSXbSVh,aƿ ^iZvz&GZosT..i]]5Žfa*׿vT G7:}_}y{ytzxU4*(Sc!:5I~Шod]ETM4Wf6;[N6rrA~]GN9Is)Ry6yؒMMdhޜt:u|u[Vw \Fݓm_~j뗥I:2Z?9tgh+=xwcdΧ:*tr |=n#N>}=YL;/$;s* EFU1d PB"d#*tbY?C?1zg>e{o|zh|5I1~lonX}k#F꧟, ,1ܽ3x6ZW*@F@s]CL-`*eShђ]OK '$`Gf-C N|mYիD c,2`V!ux$M7_bJq$9&l벤𚽌G}X1B))q$ )ou}Jh:g#h<F0Җ s/;񸊳W7hp9>2a)>:%_:k8ȯ6k![1HU.H 2,U]? 5@)r4qL%}T*UAfYL"7l8N^NNC3nK!B[ӣ|>k{=.cM?\`4ߚo_\?nj[RoXZr\?_~f,:E%~aUSۣ{c +qZ~ƣp6ChwGbrf<:?ϏPͬÑb0;AZҷ=ǔmfH=V@&e/k]$OC~4[bR;T˸&>n\ZfX2ڸHB/Ka(휁$Dl\e]He*V!aZ$gpf(Bb T5FZlMY =u`o+ucQ@{Uh3 ݀ A&(,aMgD4@ֽ2o )$P@Rf  FxO&MF$}_h4Zvh0p}ң2䖮IB (%Z>W^VP֞S4܆rD iF|}ӮfAT;t)(W t=dy[X$!/1OqKʷwI"jYTJ՚L)S5Z.&*pHzSbԆreV(/NL[=m xryp^- YX#2nJ,i4 P"1;*c kǣJ D.%ix;R=;1f GP E)$}v4ٸƷ=z%HSź`o$T2ܽoG$Lŏ.(:2ѶI HJujEIɑhhZC:k0&T(0E]D4mqng]fyW0I*g[lR}׬Kz'\cm3rEIo@Ïrv:Զz`Cᚳ4lژ,S&`".kG"Hvm3lS$bYBCJQZ^HIg-jyC`#lVqPZ%eH wͯ3Ī}'z-$EU IQ.$_)P)WfqQaHUJlS:[:dR ̠ L F Vnꘛ$^`E*gle!ʂP/N,H@T,RR9y~D σ?RQFU!5wgŬEwS՝c[R,qR!2S6iMNJV 3%! "d._(^۫[e,+Sm=''gVA>'ukܕFH+m}[2r4Kf ̍9T;"6;m1EY!nG$:E,Ϛ~d}m_;2wJQOs-Z- 왩#9S`dJF yڞ^W;&7a.Pgz۸_!2 /ĞFc:AЙt?m)N>%"Rk4"$$Vg;PI 6HFC)-?#B?D#/͹"V2zV3!lm YH@n/6{.o3)!zS΄qM3uj rƌh# q?!DcLʶ% $%0IgQ2ՍHyGv+*/ "$!7&^[ 18a$! I=&<-;[v7ݐ3Ř5Ju8$-世>뜌3'Ιg1)Ds" %1NSa@R19ptXjPmg6ɨo? #I0ySވ;l_d[F2DIBy ӄ: >è!wKلReأMXM{"03S +NU>#XNrZL<6iF-죉989J%WH,tHJ s[}&Lf9ʆ7 J]|ps{:^Id,Z$@여՚ kYNTQ20 %x2;8 \V ]j-RPO=i5msLS0@OaȊLvk[Ks;5ey(Gl;nȶ i-iY=G1{! A?" Yu>ԊCWxoF$D4AE}wЃGgMkV=R2@YRO \ 2:?Ԁ9>s&6 <\ӚdP^vSءRNW֟1RE4ۀ7UZ0e6oWapVs1>(dbymu0q' n@qd&v,5᪗~&3Ed }TKJcLNP8#Y9Mo&jX4;$A4S︤,'4PwD :2q E BVB!sɔ~= Sj9=b-H1r1 i6'B^" #Dx+:G<,c e)gWX!g`!U.h"&) -pӓ VVg dǽM# ^<6 ZeNjM,0 C) [p<6k|ȞV(Cs1엋@s[pe_|“}G3l\N+P+\>SXNۏl̬xP daJ֨ D[HFx@D{(%=Ҕ|1M3;]uuuk%V7ki@E@R-$,уGnˍφm39-t0_[I161r:Q`J?r&@>KF`Ŷ0퇓nڿ&)2 !H/ K.+2GTqG&quː %vI7#W3sߗr?w&&7e+rFN` NnuKP,ekZ*힛WVV/?=rz0RĈ;Lcϐd@ʫOwpb{ZL߯w% <}>F7ŮR_Wz7ߟu|d2]'o}v.(ŀ"Km $X%Py(n%Bߟ< yh|f07Qmϊ(M1 p!@WO{? qxNf]@ -L K#Be*5m =yգzz,0j(ESԍ!xɬ׌Offkbㅫ |}L-[y9ƋjBcR?@ȷrV6y+2M(wkK}u!8}lU!rԯ F~#G[Ss-ċLf_}ցd=Xmu/bZ\;XD3Rd42(v«6QtW(fu2Rn%.ҳB[Uy?Y~RIPVn4pv >^67^ZNe}dYZ nCԼoRod"Պ^fqmy7K/ӫ6$iaSveq "ts_OԆRy^O!E,!Ր[&K,uOM+;O@c#'V~6hݱj!Ȥs! BZXYE/t2R}k5O8r<Ȋ=xA*QzMR?~A D SMޓ_&: xL"N4%/H) {b1xL%ߪmX؀.b,@}s޷X?ɞ2ƙe֏I ^5vn?%)'Prz՗ G p;`Q98X#~ד>Q`}2&5KP[޷ԏ~-}KuJwd-j{{ (Y;ԴB)}y.Be!k%Fk KxcP%cy{l72>{z1@ّ9 Ґ-)_Mx>Y?4'.>{y^= m+^箠R6z$W_g/!W\^RU'5S20] ֌UYB2n2&dҥ?L^r*+sKy!YA*= jdX2`u QTe a5%ӱ 5MuT4G-3;܈75QnрB#ÕY. :uT&4q+J UVWYmȗ 8 Rm_uIK?Dⲫ:ޒ6nRc.DrE*== &h9]p+ӢLM5Ku}Ju^2jMrU5!#aH )"A=M}JHcc~>ʰ.[-<,v 0D$]P! pe<$ " >W#G1K؅bRzZu,f>~y~RPKȂՐk"ktMdVQ]KbK{=!*K3`pS2$Lbs\#hafK\U2˦, A5=SJ%q-QP R|O{m4oABWbsUW-Re<%ʡ&ݶzNDA3Y&t(ن +N-R-**[#Ӂ$?:簘iT:i=b+VKb}pVT,p>^1mķS8s]1MHc iCUVD +!5]6ߚ]:8bYiڧh)'H1'P&Ο3SڔLS@یӊ J]h_\r]gB: G~2!kHJ3ӳ>YRh6ϺqC k!)DʥOʱ)JWǺwKmY e--E"J%^@l(-HP C`wQ cRFl+ͨ93:4qt(Ŧmz~#M]$sƍJ 95&..uYᾱ?\W/ёG>.y6߹DnY~iGI ԴY!O4rDNp!U#ш9룳L$+{U,1-}fF` aeN%^ldcVFk« ^`k | g7Gn-*ozI $,dz,\g"p@ '`8!!ج-7k7.67^1XqyMN 9Z HIUޡ`?Lad'2FR sqQ_d,sEsdCd)puY(}UTHQ GJ%Vd޼ZY]}t XĎ%k r?]l,idu}#.Пsǔ`]2|%5q8Pj='BKẄryh!14 oN -9%qgz\Zmb*a82"<?"-Cᔺ!˵tVZQF2bdaӰ"OF Tν_Umދ>ZT~AG^IxHE)uƾtU]"r7x5Y ,QE>kj<];!&R1qMe]3'`U-KyQY ΅W'JO).8,6? 6~NQL=&cCӭsY&T 9 vh83GעBb-hu;uf7 UI_0&U.Kjla LiӪ o~7tTT%g&˽/7ZifR:ƽºO)1>x12 Df;0ң]^w"\ D+t<1aoy"!KN4:p#]K>oJ* ǔaSQ= q^Bf1qW<18FߦH.N祓 ^9JVMjAlzj2PĜHxaƁԾuAIlW5VwkY8!Kɋ2f{=l=:QiVZi$榈[a-L|Ο."H=KtILhK&s_(.Lԙ6GZTG;B7$cG]ƥk@縖tO R42OV׍ ը45!ϸ *5'#`؝XBjf]j&ّ/Ո#cṛ)Ev3fQ2ިlpl({H+vx8s9&ĞT P E2Ar ɖDq L$a8J%  SD.xp6{l6͇ɬwOKwp2dz;_\6'=K˵{` `p!re$K'd@ qo!%YIPb>1șH0:f| ^ ƃy+]gg߽x<^d/aܸ*g83@?щq_srqEߘM$%0Bc<Hp;  |e Bp .U泀e@}854-iɜeo<'2?B0swEdʻ0F%I0M{o,wϓ޻~u|O0M|hHS\қ9OwCz0 G\wo|[xxMCw02 ?3V2~ 2w |^xxۛϮw!?^{d^%i BzoSx>ƃG@sN| &7edJKku|)o%X/׉n2@Spr ^5 VODӣ_h0Nz_ s*|\ΗӒo3H mMD #@nKwzƾu}={t ~uo~LWa"`|Hz"pB?(=da2~Htr9#߂INȋp1"wik?&~`'_Lu9v>#0rgT/dzKeK>:8[|ؿĦO &u4,4H4tŃ[ +J,_J.^iǼRwџ_/!;9W8C`#g;++DgҥPP+ 0%d"S(qߎ}#\s~Lp ^ D,ЃO0%P a-L3EK,%j3/ C(v*Rn?7܇=꾧\E z/ou(~G;_:2B> בpDŽ9,F;|<|F|ұ(⎅v,t' ɽ(KC5X#O.njê (yۄ#|pW&U&  *V 4}:zB,t};ױ| oΕ*Y \IGc8s|̝H1?`DT-a{Xgvd<ֈ:q_tmx>M7e ͋ F;_e*s0aq/Jb&x0k5ab$E+#NI&Z9x/px1{}s;] g_s:7sw5 nfܨ0p"ġa!ÍgJ1篩rm4Y72tf}҄Xے=]~QPzc):zc)Qk٤1=')Sawg{ ԍUIVd A z%?6V >VP%tg&_ǛkJ pxw 14S+k]jS8{3j0Ngg? ě<{df_fi.y_׮EPFrwJ*lյp4Jrk}G |\0]UAd|66ZRgzbӅcT>V lT*Е|FXB/$#>x7AkķB_Jڍu&)J3 X&5_U׭g0*Q4jj6 D+Y(m>_~Lz:׫YHLfr2}~{l&i.,h gKL"J 0t Z"bxsJ˾8"G PE eNj(6$Cg3<ݟINbgD_Le'*cDԀlIBg)&sfS3k(޾T-ި8FH!P;Dm^̋pVSí[M;%i#Q)$V|e<զzMT6mOrc}!ȽZEjCrr?c{6J<*qu} a!qbN2 'x!p]-U]Ӌ8-N/-NҔUQl,=;dSP '[&9)j{c'|kU: g|իE!TZrbZ?DQo/+2x6x}GQL@mR١;00sawHg-Rw,>P-&I):\-޻iYMƮjsNdXm25靗bT !Cw"]wHIc=R.tC"]KE>~b&JUPM hGY>i nx DA1- ki!i.S+2b}(kb; ,˶<TrAQ%6IlMH'e"E6!eVKX5HA̳͛Lm,chH lZs(c1ޙ^{ ~QQz6)#֍f%K3,H(X 5 pZAko%#dj) J_d7Lvٙ+ RȪ8CJŊKY[*)ȪeeIyuNJFI_j6vQɫoT ~Wkd:j?٫ ݚ;M˫t|x?ywrN7Kb_Z \]S]^狹Faw<}F˛swzݝӢMբMբMբM[LcΪeҢbZKTt0˂A AXŠXL6QP:ɮd+t};=kyfxwݵ>TqQ! 0 ]VE!if QsJ!QN>k%{ 3ǥҰC=b.pL/d%Y9QBc*> G{_H#=#0I]TLNpt1h2NzpXNn7ʀZ}mdg7w}w&R/n{Pd,E|g tm褲zy&YZrҏ 9Tʁ&V_BUi1i6Հ8Y= ڸT~~.b ڪc)]pp[(Sll~Q1ڝM ܹA'ZϨs6ӆD[`3+/cJ ^wj  2i2ƅQ957k(U0kK*Ę9$ue4)1cF^DZeWLs1c0e%s\!GJU eٟ:f` G!g8gRF9 `@d1V 8؉9$kԭlJ-e5fbh[@^maU56Q.HUbsxmJG$lfǀR=CpNh|m1{8҃'~U HsiZi9#V5(3 hHjv> t۱Ķx-Fjff4>x\X7҇ޮAVem-/jM=fQ3jM 7&Xi3{@m O bXQO)S 9^;9 CEޮA%)v1x=l/jDË6eI`aӪѲ0`Dm B Pz Tq؎ׅ:n a9 l,jbK.!p ]jYF!l-mtlw/_B{6%|gڇΆi1?SaҾ=tsC_9O/ky-C]c?c;(^ߞd[;FX+ن;A % );@KAe=ױKA~26+N}a-S/xqrk9H`C!.aJc9Ɩs~7s& c^59F)_U :en6q!$[^L5rˌQu#醭cܩw6,X?>zrH\%=,yU£뜚^:kMKϽoL]2i)5O김{'+Stn]IQnlhؽ7(ئ[('~I{ ۉ1D^z%>Iש4)F+xd)xy%Ğ?E*ÙŌSX2-mb͸C5=Y8<}wM{mcOrԺy{Wr~=Ýݞ;W;'tPa);-7]k:Vɿ9w&4^%4`fۤ* i~;y(ؒ>w~r ݴoqw̓5Nt/z6qnDjpt MٍsCYD4V;|k H)w( h~6'w)lXU^DDbX( L VxX}c?v"9O|p>W}vPV?Ӿig>!Ϭ颙&ed sY:<3X`z战Aݼn{y_) 6]_'\(v]iq2#Nw_kqR-y h#qO'=ɸħ.F:@r2>@iSjkW~.ٔO)Uw̲)`| $$b)yqӾtğjusrC<׹@~z|@gU5+W4=鬷jy"xdKվ~s?^qo#&J d_ibф)nN=t$몺2{a ACP0^C=CPY8x씣z~dSafaZRKzw=~ffN2|~?q$$F>6}={{|} ?nd5}kFȦݏ{zAM72M=G;2wp25R,$aSDHR xH$[PeQLTdkwp +q60_ 5Kk#{م7NIi"i_[GՇ{FbUUB'Sa؜A~e ko Efr&,u20°8 MIwޗ;gO8JPV"|jgg!#,J\o/OncI9=x2"jWAX" ftza[" QF 20(Ci*hlw<;ZVq cRlwl:^V6G:5譅k8̪U<\WчnoBQɹ>BTf3Mbvcxg#HXK/?WK1%cKD!HJ3[%a&i1>+aMg cf\4ejNL#z k)ː"o3&J58 CQ+0y)\&g3b119P2:D l Ce\DF\!@Q>w'%JȒ|z:,~=>i4<^`c'ބ5g[dޫGs~bo;z,mR_nֳP!{/; ÀO;LOJbݻ0jx/s-quL̽<Vғ-KB4$!.udJ"t:y]VG?vA}GvD;NhvkBB.\D)A!"Ca9Gozo4~ {n0^;zſdƷvېu.vZ;3k&oZLI1n'aA2'Zlh<z`.-hK{F 1gAOV<8j0Ӡ7*eBٻ~~:L&N{|~PiٚyR,GenPζpcɌ|1X5q]hJ&ukMq3 '=OkA*[+c\FaOfI[:uVL6ȶ~N}v$`=s]ūe@->\N9i ~U9W20f8'c%W+9€6꾄PEr/Q@FB8"PI-:R\#JJ-Տ^rlǔVL#k)/%+%8w1e@X0Y2m3K%XLsld"UDfN,$o^nD<нMEjMx9J⡮=/F)-4j#UV-&(Wr>q^*]|* >o &RR x_:ю_' 5P\ZMڣD3q: D۪M\PAuż]zǃC Lt,j:W#hL8w Q%8\}>K udɎw5Jwӷ:ߍ)3`G(;*a{.+|%[)WĹxY;q<NȚ9'E/n)p3Z()п7,#ZQ p5sUkq䪭 7n-b^@UPdG*oHrv1s6B៦MuRJwE崬_G\ڛ>NZ@5ϻkrsOa v?{4. _nڔ_mٯ7|M`{/[Kp/Nz)ʛN47dSTZӡ588Јě? sКY揋 d/ͬOaޏ ͡oco c*0#]I_kΙ+z3o? bi]MwMt)r1&q -f2.rs8xFSY(]RR^^kE9n9YZ\b4-u$`4+ugcL5=E[%gn6,/e(@R7;]>BLgWW2NX]5le o60ftij$RIkjRoaIWapVV+v K$s$Rr%:XSаUHfwaat>|]JG\. ,]jXvհiX*VY5W9 &fiYF<"plbYtx|j-Ou{M;*Dx&e T yK<˄4뇌*99cG3>'ϟ+QFwrf+5η"IU"MhxJ Ѽ-MO/9Vaf@HX${™I/\në<aa aRF̢8Qz9sB.YÌbty,>جȚв';[̡[L.JgG]T[v㬙iޗL9xLV&[U[ |rg.б muz9!t5uW͂iG(n+0@ct۫2W[L] MʑKGH6-N%M}ue"pvޫW >槓RC# }*8E(Djюջ oּ<XJW4g%y~z,`֧Jk{~17&ӏdp=RT\(5F24xN5Xb/9V)M6pK7ʉ33I)'W{ z`8||xT)-(D JSi9OS-ga{i- z$~|L9 Ձ@auF_Sƒ%Sll8*!i})B`+H=x;/`s|sp^%ߒmbl¨\vdTqʓ/n]Im|r"!@1vH8"ҙ9ES1mUgZarR漵pIOW)ɩ -֡YuC ]2&KpRnV(E9$Dum};+s Y ސ7ܔ 4fp} p m9+(g$7>24}6Sm38n{(x-S}pt%=ZoY ';gq7#=iz-SLT%[JTG AKCp&?ep"r/G0[j?Sm_qWi$z5`U ť/j?}ndj6ǩo|i8vv/o^{a0,>1֛?Ǐp| hMmUΝjOm2j =ߧwz;Nrڢ\WmF}qtXϰsH&>[Q]QYZRRڶjML͝6n՝[dd4k$R\;GpRB)2ybLZ>v2kΙ֞6{pkFkӲw)kDdYc F&gRҰ4SZ0nR}4҂ @:H2Jyċ<];3t jtf^4^ tNt@K8 J38P(Db[-EƑYA҆M,/RLOO~AuӦլ:cJ<,ъQ@S&FSR99,, -eud!-Q P YS`R0.!`S0I Ri`QaK!x*t4Jdˆ"6iPS! Ҁ8VbJZ)NAM; K, T02 $L+z,;ВRo&LbLK]pĥ)r6ˆܖ̢sB۞RB*)~p5mΪ~&,+k#71/>TWx1_`p4)͌7lUՇԒBB"3/7 X%A[VmYar$D+g`uZ@mФug8Y[fKØճHX4d`:h#u !Sde/]R4 "u+|̛q?v' Qy_%+kH8M)7eɼV8V0gu,)<\3!QM -J.7XdRˋ_Oi\^= }p"8);AK;'Wp/`;[HNvڬ> I.?_x=zu4櫯?7_Ui|ɫʮDMvGUS^*h~Z;EJ/@hІOUvJ+7(Fl3$ >0ݺ z2l51+Zԣɐ<uKz5eL))C()Pvپ?bELe|M}7M}}exO|+zt8~*Sx/X!{1}ߖ_|7)ɝ]v>k|||nNѳ HCpSɣ+M2s֭/jT]!,i㾬[Z!4 WѝuJ+6lto3*~J'5b秇ۏsL+Axpu纖bɪݔJ:-YH>=<*Suns_o/f[B(uP~'u[~Q|?6X-q0Z1Fs2Э^,gد4 ѐ~TDliqFOA5YH Y}cx:Z3sy!4OgdaC`FJ.JoB7ZfOg.B?dbXn5 ZY;28+OAm-,-BXM.M e{vc]B قw<UDfmPDQ. HHKGH{J荋5n:ND [:;ّsiLYYhqr>L\Oֵt-TB ;!99~(P$%^` dQKOB%mkJ{P +˻mI7f]6hHcomChfVu˃;Fv@?[N"dz=1E~d*Q kx :aϕbu/U\if895! j(oݥ3_w+9g`z_kƼ k䯊BKypQC&eݫ7B!We-dsM2\""!&>k<#։piOV-:m<:l3QHrz/fҶJ5#-dI,`~U0Ʃ o=c]Dj?e%ǽ|4pfggP-OGK#/! +F 5bf^Zrm9\ sM*mP@Р_+xm)X!' 7.RFk1's# ߝnMǍ"yG%)!ht#ə^_(DW/2FBh$pd`2 OgI}vmd^b4B٠a^* :{f3T fN%U1IgHtVpmF~?P_SlOUsȾ]*5r,r7 >P+q?^'4W\Ӌ TN<6e'( z|دzxce QI[ u=ց%`ǹe¶p2FWz2]/Q=x7ͽ:@.Puh#뀮le8#PθA+rbX*'֯'ۭ sJev2d LPiAa߁z2; h*tökTءC NASTd)aT@U)Gw-5*)%8.]OPH]Fsp3 )':r%F)fSJV6Boͼ\٥P$K.JnmB?f炱ٟg6o-$fRfaUjel-5/*G"K]6D2K͔*Kkh=`14d VLA2y%OJDOJ`Uk^T؇|^DGWSV[ M/ /ʇ~T=.5}=^kin+hŵP;DK^qɤXx Y1Ilf,d:+Ai&iғSCQۤ^.5#5[hYYVN[y)|Ykkm@ԂshJa%'4ĄD=H; mRѪ 0FF\8Jw$co,/kQ7֤F-ТsF-g)i] E! 4=pW J wGKV3oƂM8۟ɳoZ.N98H6kX-y ԃ5׷҇uHE TDs#F%7Zhb/9b9~BGIA2ID-iޖ!'L^ځknjӶ֛s EB}\f$Dz/sR~u)a Ruٹk¦5/U%C#{nkz[XI#Nq[0|kO?ն jA .pzW7N,RJy ^6[-hNUpQshbN%e"0$ib j#K0-B1[7niK7v,]gTJR6GA("l"ZN 'MhNzmS/^rTv5XUX- ɖjCҺS)~AE,(gghkX0yN",rZ RE 1NK1PWBk*0&#;IqMъ dIdlF hJZjdd*V&7{h:P)w\@T,yѺY^1' {t̂I #1R\Sԑ#JRCr|+!YRK*KdbߋR I@AE1)erb%\pY!h $ORxbe0QQЃbZ#uMa&3U_[˜S+)7"bgĒy ,y) Εq>%#; '{AJ@Ș"(<+}#A[]Ryhc0jUKKtϤ^"QV$wvWO|\R#wW\Z4[˙Zem6(҅fn02(,xYeV.ƋR9V:ㅚnq+$uAևI¼ƽwKkR5N4:\o `$9 r5/Gڀj5/5` *FR W4ylFR2I*V,VXzO|s k]Â,u3X@>Dsŵ4xVrN8nΘwe6VliN݁x#( ~X_4SOzUpH35'@x UؓNVpEc_ c5˭:F~0u!C0Zߛn(YnvQ7t-F?{ƍ0/==b/A;<NJ5KqeuKdlfqls*V.ńmo?s9r1;I`ZV7-Eo{Nr=Ve9j  Q!t \A4xrF1 [ BةEi_:%+AC D=V|p6ݻVl&#VI?DÉhwGP3w LA%l ޯ=dQQvv9#ݻ m&DM0prܝ}~}X"yǽzR2Br]hIYU~㕔9;'e % ʪj9Rt}_M^_B1c.9MDH>'`KH Xihye_bcIDBǬ Pc5蕻fUta غK{BRb((3_P(#X^Dmw bYC[ԛ2mՅ,C<|QRӊ,ӗä;>xuAdxnZ[՘eW5^/:*Z6ه = Vӛ֣~abЧp TB2+Ҽ"\8WDQF]hNOfw)O?M5nw7΋??|(mc Z_VœGMW{5/G?<1 zX۫|:Ϋ"ӂ*/Z y.գzg/>*-\g嵑_JD-"\ !f) yKh) Y$1N_u5^€ҭ:C.h. Yj65\u5EL<;%u5`TaXeEd9Rzu UҬZטAEpP5q˚K~۳G,[GUl6xmS^F m`'f[6:ޙ66WҿoGjtSWkOА3hɯB}ݏiV)f_;[Vqe?ΗԻBi6Lf6nW*0X¸ԚJ,}p QiUu^YQ[.3,zMވdzIiY~9#@xx9Ax4:_="p@j9A{?D }-eb“W|$did^,.0 @Oi'Y slX0aqbHO ^tB'*w̓j9{g3XdޟnKP`pN, O: J@D̄DK2fH?:ǜ_xusnTu3ee5 ~\"YJ pb9e|%TZwc0dW5Tb@π(q.ERU)\Q̪HYP 2jMH‚X?h-\zlfw |zZrф@+R#qiefJ7gW*$jodq]Vm2H檒.?HrR5Tڨ.2x91g}m\ϳ2"_ DžavYw$V4I] s]~O5G١f?\^+ ?VGRw9'~^ 4cy҇޿[5`Dku|Wo@,0߁g׷gzOgIn2p9 ſ#^/f\ןY1olm@ӣ7@sL 4l-蒏ն 쩾Ǝ; 'q,ޔ>]xǬѓBDZQ#[{__77/W[7>R@%ox>$R #-O2N̬4^ǠStG ]VfZP]7JGaӽ[Iiug8Ԯ&"Ŏzxh9\fjt>$< DWS2qw9a2ǐ#s" )`Nt$KIuĄnݰ ` $.mU7Z2_!i&:e}}\XA#ZUYg>s/7}}ȜF,3c3S%/\G19V؊AϮc 7ѽof2b5@I&V5dz҇?b_IeISc+VOٓw!z !'h?]#?_SPL86Xle yܬJʀ]NrR&p&w'q&deH~ۓڪh"@0"\?l[&jv\l}e#o'*TnG7ϋ7h5pd\b5MWd\ZMDQr«0sm*'0L>5 [.,s"Vqb_b.mp>[\z&ӿ_Y\8 gqa,.8&[k@JAKMaV2R4WdcT9<@~3kף"e(? c{75{]-f+9GʚXq,c{|fsdu(i0@6,$XD*x S0OeΫ ߢW  _?vl't`]Wb3//ղ豩]o|fƗWZ v1\JSLL$V`GHfC+~4b8#: 6"8Q)ͮ\-iBܧrS/VY ,^lt}JExx"ֆuL(D TЬ.9E4+jZ%-,Ϯ `]>X/>%NDj<vF{Qf]޺0Pi؀Rxs$3n!0/ػ%+?'• [qꍟ$^ @0~m`3,:O}7ӱC:H` @o>δ JfOl͙y>;o6;[f?6؝``tkw g%7AvpB[ ն2s,aZ.\pa… \l@ Z5@5D%Ĭ`eZ\V9+p5I*2ۗk(HՔ1~Lz~AnM{t,?X2Z@( Ŀ(򭁫rpU- bex@()l9`9w0Ɉ (H9Xo?`=W]9q0F#a3rvz3 ^ew|RY~£S~.hq nm!|V?JSiI-3@ՙc+ꦞː͙&+T._l>d)(oM؆b|q}{s4U;2S;S&ĮuH٦^s7.7Q Ds XJK\њjQ%+I`v@TA0t%Oϯf35WN-e@0)ç_W}[Wuk!Ԓu%˰|:m!ai?[s460IOZPw` PBPޏ,d#L&W2ekvR$ŒdyFS璕es#}e^n2'\v<$XrQbE_u DԖB+ݝJ w5c 2K܏ܻU-ԭt1 hD qOlz9"?(?`{˗* :eYY IIL֦U[Ij5-y0K#`~xSvB\(BX'Ω C~Qgs}){<"¾!@vBuь$5A1,SҖ=5ӷrVQ/>_!4J ޗˇN-qptG(S;HNK"?$J _a@eVKTdRI CPvh0|ƽ]=СP3E;A2QCuw8.LQK \@ T()}*eƝfn:7 W1 F _ oqd-s܍rh=@_1hHdE*i8K\.x0M_ HpOk*89xݗq rD90" @ʍM1D]s2Rw @ Bz@:`rCdg_CKz(Ek >Mqn:|#uم60f&6h$]޵q,Beqp_ !Aه@RDY/}I]ԌÞ Ɇ,S==_UץRHcG3s* D9>XhY`_HEe _^:"x8[Hg l"-\T!F ҄ A1%Rx.³;E= }I9rQ"(}2(6x>6#$72͐˜]{T`LW4C㋔"%:H/RAEIeL,Fu]QpTF+7,XC7^lP"P:n/R@2vr0jtkq&9]/Rx* n;vUўpz8A9?,K,&$r   #Rcޒsn BDm0N 0`HUKͬDN %!(MS6F6R8c%dX/kX@H2O#i <%Hg i&mleހj* <NՌ D<5|̂;\ 6n9E-rp FET,I ΁GA ARY '[UYGӥY=/YԬ˧I{u)"D%C&,"$>8 3 |MKb ,4EXݡָu\Nި2RZǬ*%H1/D-P" 0Yw4&WmrEtװ9Imy6n9'YY6[T&R)pHmb}`~wst,׋խIiL{҈?ur>to=w m(hF?~x iA?=48X;> u~W7`耄@xZw_~6 ./Og[<] v̥Ys/??s}/wOF‚0}zzruw`)6[?gwdM\(0n6NvI"$K/Y~wm *K5j3^2k5ϟY/v^<|8.o3)шgtiC% )(WLq1hО6 RTm?gG}^+z#-urKc95ܖpT0 .#/$0 FșQ>f4 \ŹDHokI@tJABSKd&O{]&^1/Co@|pShDk^zS /K`G2_^&Z-/nn8GD3 ryp׿%fɿ~?<]/@ӖF͖go'}t- )xtژnNU t2w JO">ᶄ>"w" 1ȓc7w+X47섓$XY6#u~bEAIvpFɓ=faਪJX)+>ma%);pm< lZV>pNV 6iw9Zug6t3A㩂r s ^R 2d)JN&!YR!<a*m#0=5|%gyd&Q2R }"ńKB`. &2iSP}jр56ZC$L + E(-tIxA3-RZk2S3̙;?l"d;\Qe]&YJ)?Y\6i~f<'Mzų+\wjowG-e>9iIw~+lJų+Fpӹ4PLrl'w~#ų+'kEv)+GDeCM6 4kGi4s2Ar`f6aa5$avWCCf6W>2j3 tI\Ѩ)b)-:qqնH_7-{eL.:jA.uoAD齕l ٯܾkQ[Vv7m8gOlPkbw5#M_}/6nMG?oZNBP<wKSΎ޴&ʪk^j:p0ȏP1cEW15dQ9h8.! x< yK!dvO& vOݣwQ+y; wry;Di1Z'D%H"cD(v@Ye1ZB쫆=>ͭ\/&k.x}— 3|{{+.Z] !j<\H\Y6rIxҚy$jb.5oL$b'Z΂1p4ڥ)E#r|ԗpM[v'ۍ]q_dm'+p8i{-ӤpVTJb=dUJ}(Bf4Q U :bTgWRԉfYwPEPMqvD;Lϊa:C2(v2uѐzV <~x=Z%O )Ӕq=B:v;,0!`at &a'#F 1vJA3MdsvPJ39mYPsR %LS0ãQ{PP F"$3.`H9 %8eqrz%VR$|4G=֕Uf1(=Ҡ'^U@92t99+0wHs) ?/eFGl=JeFf)!Nq % bZDI@`HN塣NZ$@0ɥA.TS%U`J/ aL\ ,JnhЎD<ǜYLL2_gKou Z'K鳨؂O;hq 3r5Kp9MEԺnj5= ^0>lܸ)Z75އi#܇id5n ӨKA0Gh$D ssHώٴ( Eƴ.[Di:8!k0R4.GGsyqLq vPѴaL^Q`X{ŤkrG-[C3ItJ)tV́+QL aJuAʾp|O<Rv2VҸ׷,qz/sip9/z<%Z-J_߭nVg?`yy _3[%z9Tۧr+w3n>[,oM^% ʉI`yO6 {4>>C!DJNYptS)`,<[&>?~%P2gC`Zbw*նQb2*`g4"THŽ1썰4UFx+$\P&h6 jMm6G2.# ^]G϶MT4}[ 5^UY9yTtS"V_Vg~z"2 i3Nb?_‡LV`zݦOȧO*ew;7!hR-pqw RhP\?-G2rq%Zֈ\0ruB5䒸Kre`\u Րr auѷaviÚe^*uIǙsHԲ>}$HS!TG%@DđRC"A$Qͼ2ʈR{0cZ 嬆&O$YyVDzDu79Ov8/UɌF%qFcuY£v'En=_#:|ltf~a}AJK~xm4ovKxLrx`R(dJz1ETxy $>6#U-t:p^2T=pe7oK19v;?i}]el}n|,4㤉8Hi/EIB}!S-NZ)ʥؗ ZW,+"(*^@}8WYKWߢs0_hPk hs`>Voo.JЖLu^i#R% -ժXs(WoJh1ң9 3)FÄu7D *+" /L/%#Ҁ2ddB\!c|*RV8\dVشݝwMvwV at[!)yr9 p+B Y-1 [aHu[ |oyE & ay ^Q ,Z-|(?J8 AgzF_2e  & .[HNʶLٺ*4F;uSνԎ>a,=\]dͦw%?-ͧY+'OZzӟ?>,f/BҘXRh58ʈjiAL`dXl=B %F=r]ۭ IӅO>g5Lʁ(ۉ[>_V5'O pf/~ !(vGqH'hVnC^5NQ؉]x{(` nXzN03[18[i)oZ%bBkrw(NV=102i&?!$(ё7 =}l9dl\4\(ްbϜu1%nvb8l֨: /B~+/Azp!qMfr[>ALŻ Mh.f{)˖`j]-Xj Ѥt>/㯲Y RsbxBjϴ,yudT-L_~:Lk1슗$*DxBH~3ǕBJf3,Ϙ&\P/S=knxd%h"_2J6N s؂ ]ƬY$-<یބ7$b7ѧӌ)"E _7"0-քBJSX^aI`Mnu#Y3~?gɆ*'P{>lJJ"O>^4"YQ&qOJ=hPY-LCeBX ~BlxBo4N2gQэN ;'87hu57MiTQv'juMesUا`8߅•(YQDD.c ɦI!%Ffc3 / hW&)]"Psv s]wmhcEQKCd\$RØ~lik%ǹAY*n}`ިsޱt Dg?=4;@A9ù[Ìcq|>aSb˯aT$S7㏮~)>̂Hc$6uiCT*Ur+E4(aH ה||e_qh)S1p(F;D; x]%5Yb-`${ů 43b,He9&CF*Cɐ}%G\s|jUdʘ(k~M<'H K^*yN/D&6ka/-mČ]O-_h/ne)P*ɧ*> /޻*ѵ NN[ "~"h||gǍ!5ېݱgnE:I9T/2b~hϧpedYFn3XR/K^}۫3;[턇GU%ֲguoPRLvPdK%߼1ΩjuJIb^H q* 2.S9| -8#ⰘD"!?(dOU;➸ui=ZϢ R2-hSi1.?#-uAC|`TGo׃x'l#wq |V< =O CSzb3-9c");{-Li ZcJ [/Sz~W`x @3֒a:F͆R  tDB|SafL>DVcFƄFf2TOxzNl=ZK}%C'<%!W<i8~ղ44 sFIg~ o!4SFzSg4wWXѳ,{ӹ`I=i/A_ ¸u)] 5p1Q9 ]W\UϤޱj0{ TFN0+ܚ_I/0Ij.>4[cs(rylcqO, "๻8:K%CiN+?:cj7ߪXuPHz:^E .:W0 IK5ǹA*k5b![au2o%RtuSwVu2W`w/~ob]GJىʱ B0IDu>w߮W[n_F^..a.DeyXa19v)i)Uߊ)olF@)(~$[-ʮ܈`&TBgR&II£IPa.Hd鴩\~H:ɼӉƈqBn('YwӄhG2AnqEj-eYI ɥ>khD\xG+O'#bU}΍qF̯Mcѻyǒg f {k^;4z=GʁmIδF:nس+Yb'+Rj[?g &>đ vk8Fl1{v[U,;k`nԧ\w& |'w5Q7{\oX׼| |xY#h'fbVjXyjPW+ܱWB3LS݇ܯHz|6/MW8+ %47ک(9G >/kĀBD )v]d̾c܇vA}HS.w&vXK=SSYb()C7BCY %Ѫf'Bi Mw-pj L+dF,_)ұEu lOU\(ᨏ1BZxRj0jlTWm5"}i\F9,Db?)Lj8m#sd̈́@!7Bt(ю8)$q+e{ ! 3}ÈTQp>P:}ԄQ5ko8 ⰱ)eVHLJTd S Y4ʲ~V [BɖDXJ9 dğCګҢ͢9S:dC":Wn&0tz;|?\,@lng ŤB'WrI j wYל>ʢ3pg'1px%TJAxs`x7qI7wYq{Z`L×$j@ +Jާ[i/ 771i#+$F)o2"VwpQ]@3eZ!*l9Rʋ(O2_LJ8|r)G9y8 f^٢ !+H)s^?`Iާ5qyQ" '߳k}䓫En~aP>[*ğ/f٪9pzg߽k +}F?c`E*MsB lS'c4SW`V7'BdN}BɱASO-4ZH0gURq& IeJrc*mӎ`lAB˘6\Sn0 ANRɕݤ=/ZBFr.=[%qh 6*- |%R/m 팯 4^Sz:=BBSۚ$ɢh ~57&q矯ЇQHţ`v2 a9y$":?M)di9rZLYu`;"5PrG,(j[@@pP˽3y^`B.Wg94}%اo0J0#,{|o1J9ץ-8yZACiuruXKs})5f2T\+E.9sj [BXO`F$Pr̤8) 1-2 1-jbKCS˴0`y #9 cȝqÅqOcE t9K֥|SvZaevZa V>D|^L-Px[ o y\+x 5g4\d9T,/Sޅ E_N&_!Ϯ:135e {&ZPQ[l= N㕿/]3z훇.Y+Gx5ßoa6w @XN @Mqt6_~߰ %Io/=+F_"K{<.A/Ska. 9bb0^"hcm&@L`H$pQF&eAT r!qH1U)TPl N>_ [ Vqd1`HP"%-F)hEҬ숕S57.PPL. hn x6l3}.Hyʈ(*/9Y5b*xe(2Z Gg`+0D&h!3-иfg$Lf0>؉٦`,pB! HVZҩ`-6g9I+͂ٱ95vlD>F( 0$Xh (XyD]* `b&P#^'O7^\RZ$eq kl-q2(Hu; \{EMkA'UxjR?î^ k@$z]KDwH4!\Zd)w.Y7o{Su{N >7ܖ}1 05st~rʀ @֬ ^]+7-!f64JَMԷ/U)g[1NJ) j{T,VL젆\NBIo#|Qaz$x:Wr =HpxEyxBJSK5F65 r  5d֞)g iXLC>R[vlTg;gPqYXmw~K3([_2 eqbɗtnxP7%--R,0~?|pB\$M}#/>^-_}여 &4窳jٚ?jVLq=kBFRHDnm qUyF=uK1@sT(GWom&yWXLPum~SSg{m54cz%.@ݜ2g -8BϖO@!dW1K0/fQsq;N_ \}'^L]sێDXHug?xc/VE5Ҿy[ψBü^59A)?Z\Sj$>9Dvn9_Ruk-wZ8EPfǍF0 8eU"dg"{o5:m<;i຿jLJ+jy=˖=έv\cM,SK5cXX7̭ܟDS~b|UP?.3ubLjԄ? N=aծ}Xeg2_",ut{ujѭWYu G~)Z!{ Φ9Loft (b 0J"1o V(UR%CF0ɍuǏtq& '\lZn$vF,B %-,gN> ̚4"?Hc yb„#H[֎Y6ZB%mn]^okƅ6u\/5ZzmE-"jm9]ci?gdEN;&"A X_4khom=_{uqEH_Hl01hVc婎`h)v iX9a O#7V O#KV7KGE6R.4$aਲz +ֈkQ1 ;oQZ6H '.0磈MtIQ)*ָyd *'tfث74D`lWҞ؀@*DI_*+^{1%>|aT}/L*;BikI"+MZvC pG8Mb9BJaD9QsC7aYgV ЕK3[T1j,OR *$;iBy 挰k B=}V8͜DZhO'M+QLR5"ͼ:J1 sݙ`x S[.8z;NrN/i\N\)'pf  w΅+~8 q>< WV.\ v}Q&JDLR"qbL'F-f eq|QYW3E^=N!)Z5N$zO4A''Eώ)"O/0G3YJu!nq!#x ?"ʚY,a(Lڤ2Ł*k#8(kId8jAk@5@0qNg9uAq\Dzj-Z5jEz8쌏/$?Uo=;t1t`zzu* (0"- %Zitr5v mC* je6x8V,RMFFo]4}T!(Ɩbع*Űþg6b:)6uYh.?fm̸JQ#>{'~֍5Xy)5*e&5֨M)DGSIH"&J`dH WQ]QTsЬ(?)r2w 8WlO/泻;H QøC57o*J':ԽݠNV͝Ok0z 1?Lë4f%b&HK'O-܀QK4IЃ|a>0JVxPiD#jҽ Tom*tbft*-?i *~x W;n5 ZT%h0e1eJ/Q_~n +!NwMyW.L^4Զ\wP`[`@5?%K͕iR"|A8 X lME9U|挹Q Г>??Ί${0[,<&l,9B#%ſ×.XR,yvjTD{\37_O'qԳ!{Jdt2VF>KFHc5FLJfd;Γ9J,8( Ԧ) C۶O'2'!\RIms'bckC\}84ځ"BJOUbu5W٠j0ݨ&'(L_ T붞l!j1"R /$ Ú$󤽝 ҧOuVsv|eGz뫏Y!:[ փ !]k~'gRT̲cK81'kX0g1ٻmW4p&A\5v&v4 /FTQf $jT@rX<.g|3 j D÷f"BP/d 0y`rg{x-'FK eӻ/svr_ mP6cZxLZ!IfrYe,dWsHaux™/,t@t򯏓)^s%ŀ &my)0֪.}!{H,0E$˃}2cq5{T5X}.JK) ֻASe:Wqd9dXV :yS^Nw@g&#ߋNƷq+hMQxO~K8m8R:;ou _]|߿z/%WW77oO$uv&L?J3Vn;ΛWn>zǓN~^{׃2w;Oo_8Lwכ,Ŋ|DO^Nw~=E'E PP 2_d?'Vx^ i j2ǻS?i8g<y xI=Hǽr3kD)'qIQG$%ETG -=3I[UeF_w/'/Nsuzs7vrs۹g|srǛ9/oOR%⪯GUٝKWEAVŲQ"L2JPp[Q@Z^ Q]JN! ?4tTi1巡ؾ|K{EL㉋Ɨ)Rº9R Oc5sZaK&/؛ٝXc`sd)=%BER"X`y3 -B]B&.n2]U;3T i:mɏdT,~kOSZeX qMrbw4ϗÇ7 ˚WS`^ RGƌe%R5ܧ7/_|zӟMmқ_$jQZ[%θV 0-9 PHre"ڸ 78\)WW493`im.rܥv,x75o?i0-B0f"mj妥2+Mbn 08gB!9CGTzE2=$ }b!ǥr?.QXL*X 5N`l]2IJ%½+92oP+%D<{P=;CiҙWj1XU1tJ= VR0Q9frqJ!x :a>]%O\8OĚsr䂣cD3᭼~}eCiӈ^$CiyBR!w< >hJ9A*6oY_{,ݦg;u_'?&ewiSXӃ @mX>/t K'PK@Q2N.ɿ{>^)CσKń;€ SA>ӗ9db X'ؼpG(ӝ%i W":?fݸm$ (sh@bD@\RАFH!&l ֭(tT;X72]D1V֭ y*6D==x ͻ1O-nM; tt;BYI_Py%L ٪\ð>}ZSj_nEg'i%0֟>%׌A%D܃E9CUNX}k&j̫<5Zz@35乻`3nd+OIm﨎<ёԱ]3[+LYϓ:< iE9M&d}-\į&եsW+1<6SԠ|E-nT&؍D~>GByQNB#u4T]@еF򔑦()}AOD0isX]F{GNѪjܒeV }nGWI`Zk;Ĩ+ 9Aih$[3e\5 Zih" 57tVB 4B@dږw4c]:j)@m5%CyJxm^H [@ M^SۦMZ!g]af4JvNqAg7$P%f|QMOo>sa #uMf*/ֳ*/ɏ.:O[wjK֧,Q%x[<ѿrUkj!:lZ!Zdue6I:[%l1+]|uƅ#TnzSm %Bhæ]'2ea1XܶH:O 'r#l4v7cnZk$`_-ǩ4)k^HvػMccdU;DyfOO@>Ϟ]agn2+4<rALI(Ǣ $īn25nqXcܚ׀.s M!:A#Vx3{inUL|/:n 'tapnƚBUW)J>Wڸ LF>w0E W[ik.f2jp1kD'd뺎3v ITfU'Q!1d@ !q1Mȭ"Ը7`\s$Cx7ku5 49|_iqnNZ9s!rIW8+Lηog +C`H{UfDjy[2ُ u\f,St? S8f9e)O䵜&aԬZݘHVXBv|Z,~_9V1-`V-xkJDeID\ss 1|r N-P#RpZajv5 K݁J@stJA8_G.< Y214WʮsqҲ-$'yXglTUQ)#\#2ׁv8bR.`jvG# *:!uȝ'LsPi9/L_)s5د6Q ڮLɻg\޼3%3XAU n̍PƬc v]`25<^)Bpyæ榹=Tj{&vH6'S]n0pqLikL|&l -$:97;]Nj(Qd UQZ܎ra#fpbE l۫'1N ֝n9;Yd-ۜwE$nIm4ؾ~uv~q~sn8mP϶L*51vIqZ ]JF ڝNIfx޽9;)*;F"ex(a‡L<01 ZRǷلV9XoHJ>ziPC3*M9. }r{(s}ȇCE#I;NoRKf:m ?m[SڞSUS>~gޅW'13oB_L <dN lI΋Jd/N 8ϓiw-&1$ e28θS{,bd(9#Ynj;1n .~ E/.|< hhsisai=Ly3+84~ wV[USyE NrQB(7[#qn\vo9!.ntղ!{vZXt8P==luG~g-~2H!WEn+CPaV7Mnw+k562%4VWrnE0ax/ I)[Oj*BnuF6sT$B r ,oA.H)< sې(g|b%GkahbTeII-V|칱Tl:ZjJ;Vh3ݛ!8еzM>sH&m~W-cFu2ǵg<]kZ3\t{Ux ztWS6GeS8{PD?䀚’=V.Bo{I,;dS^i;v*#)Zv,n';k%ܒ0On\s+R0st[strM[DJݦzmJ;KxQUʓZkzo3r~KS5jN h~6$nќ ZQ/XE3De9y"1qEi9bNԩNԦ!ZY#dvNvĎԟըKuin}yi{?L%g=_#+hCM56ݨ/wھٜG-Fvg^ psUjN>VKM%-iø?2a#)>JJF `xWSۆ?tBfߥ):?y܋m-[h7VPxb4XkG_CR;Z藒o΢f&)+< М'㺓 o?iJy rk89t_Qz4ĒmHwJi)O,NӚbl{( @;ξ}2GMYqD?iFv&*svlCGr.ăMx\

  • ̉(4w_yhiv#VS}wn O#+#QIz=>X u٥aJ ]} m^=awFl|C' Ӊ8i~pէr++*io &69Y#_tcu>l wj̴` yЁ`3"dȘ"Lr;ڥ;}GM?mթhdӍR`Gh85 GjGh}ҌR "<軃NCM>x1G֌qx7؉nל@'fEdq 6/:v~cָt׃G _/훷:tC럏'?AYk/N~қomW=,ul45\z:ϋF_^||e>J"axE/;㾴S5a첞q,5O݋θ8eW0tQܛs[ޟ)C8N_^Q1@4H0N )+zxT2㌆eL6Ebaz?1eʽ*d48s4ɳFIތC~Eh|j0edzGeﲖC3k[0]~d7ZlFJU7/f(m("[/܅Nކ/@5NYHhdtsw!4R($qvfƗfhl9hWEI_'I磟O`~}YCn/pilL˕ A&f+O%׬} g3_@SG3x QLxdQ$IL:&LXL0]Yp STJ0~ ̰PL7Z D9h>!\M)xA]fi A# ?s6`/ X+7/5=?zAjriZ·Co1jfvPIY਎`g\EKs$d 8H9օȍT0yK5XHEKm5*: ̩Q<QDIې-VRR~Kct|$Z#"^"F),xZDi>QDv{l1x=g;: Fm6~(vO $rZ%_^#_< >/ta׸O;.^{÷N0#Áq3 r9mjebo&h۴4CXϼv9 <`'>Xgh52SE6{_N2JG=3+3,hS\ Ѓ2[*=.2렧Uo D 5ޙH8#w" bB9wY+Vjz=57~.. vѳfXnA5::i,3@cAs W(H9B遟*rF?I|D@pPAh %M"B)24E2ɘ:3L'"V:m<&*2SIpap6"@K5Bӹ7Bx-О _$d /N⹧M\SU`BWr#F,wgə0 =[|2__[Bgo^\4RΞo/(IۼY "2M *[I{R~~#/Fiox5w!fB)0z{ #)RGaOGW7)T =_)S|C]aJR&ͅ!z 4H'+wI1$[X LIv)p7[ˤNȢDRwU1VҞYo Noj)Z7Kꤘ!,VSvXJx [;-x $L0#p$' M)k@E5Vi[L3@qޔNF!*He+g\~5~*-o[˱Tx]#C|[ Mқsq3,Q9B|FԨa,*2ɽF < [r f̃ǿnn+G9aZ‹3TF(*c3D\14i N%V9r\dߐÑp' 'у[la/ZZ\F HtXr4Ȱ۠2)Ea0H"㜃Gld\` Z:Wjr &M;p6VKRG#QD1bqUQp.5H,8P`E7s4XejB a^Sp!LRu_'7֧S4 Pg9D-Ct6^%djFdi H-'{;5䯃à|"Y9ؗ9봒AN{H6a&Vm/淤a8`GzA bj*"BPp0Vn0tQ"aG&#IB_{ŀpXؾYĈ_$f)RGRq&)r8=+cÙo e( Z)<` q7&.7( >RRsΥvXzjVpԒnEB 6 /jwJQbOL$znHA0&hQ ˴pC<-WH3XuEV%Z$i-*y[ "G5b'盠="劉GT+[tn#{H88.;95cA0;&<Fȁ10V8| #.;QB$D%1x ^3ky"oνsqH5O[UN/B^ ̕A69]  N+n C'tKnjhY୤>ƂHq\k7XɆ e%J٥Ҫr1Ec8LeSLH Iumh9^୸hR28hprHøº@Z7)ҮVɝ7ݷ΅(J7[-5yGN^"FL%hdc;DmLihə܊+W͸)=1o)ZR,fpH\FSR $ ^+n sP!_. >Ҭ HmgǏ5)& "X^*m7'R l>aᾼ5QOJŪi6e$Qf~_GRxȁZi8{+D ibUΥ6v6ʦﮯiTUD ϙ@0ezpǢrRF+(1Vif7Nd K(u @Ւ]).Kv)؁@.%JNaje,mX-cMȅʮ$bOiKr"%W48o(:A~yx|267W?m KY~scuDq!|\<<ndƃ}`4$`Xẋ_Q 54 Hv(җ'4)Pw+t[!% YxСƼHvV%1Wl iuXOT 7"GSoP`x)F]ւMK"XNH|Ԋc{aux;f<1,Ob=-aCM溔Y(s{6g1EEg&* DJ,!|tT+xג\RhpI j$׈_q@D(ԊP|)KK'8đ.ffL3;J7)*J?WF4h+Cv[mg̲y2囟3@.S9N&fIXxYCNd/v=Ү'gnR@^2O7=_Ph6HE{gof_'=b:ʏەY ﴋ4dRh}?rKrD2vhp򨻳r`;aT.[ Z?w 8\v\:]x{k5ҢtYvFŖ%硼֞]]O~ =JՙiuA5]K$FV=$N^NQQM5 N@oBvy4>|keӳVup9?Y8)co}3ݚUX:;=gl9fs>QL3 m4E Lg+b/u) G+]l Gk>Xt=Oi}>Y-F|Z֝X.Y^],<'F輴U=K_@w{2BNu\Rj@zyAFCU ӭw:ńƘt  NQ]g[%@rO<)~,%+%8w1e@X0VA[Lr$8hSwrШVSHM PwBkʰ(bQB3e]X``zN A9b,S5a˹obZ`.D\1޶{Ԅv6/.B47CxYT>MmuYeoWzDFQ";/[ Jsi9s-N(:GTz{H*^0#$ yZRE])qQʟ>ǿJCwˇ]8tD:#Dzϵţ Bǿ/';4g-4[ PMof>D!Bga>NbOB^ IzL#F 8l^j&E3U88|x:s /L/n&/?%3@n RaM%]Tl}\A$w ̦iWTW[t7pHkmuby#b`kB[?[lpGC$M\u SEVKF^xddǝrJEM͛%1c&WF[OΦc_Naa"mb |ό60Ŷz z"ؔ,`{^3'اKԉ}{Iä0%hp(oQ ϸ`cpY'lޗ֣_E] hWfۉjPWGV[_On)o)Hv)QT"=Šv[>T 8]9䧆֝%?o˰j35Wpq,,+>D/Ș"SDy7={wX<]_o$3N1FS3sqqw=ގ&1/n ݠ&,!a&5Xsg VAk)Y.d,x)IBgsTȷن"d}EDE!庒 sxʥ61N|$e&w2T.1/D![tZA'K}]Ċe13$j$s2?@W}}=:ڇ+ÏO?!K H#+_< 0p ""q] F_ h6_x<\8[ n/sݿ875P?=N&p}J6Ђj RoC%9GSy5jnp:ʖ-GZ%r3Gf.\ے;EkIƄ20/8B"z! -1&,BcmʃQ*p$qU& @2!eL{\rM,ɧ|L1cV+ A38H\#ȧB]i] -Q{06ۙ܂/N ZG p-R&6IJ).=ew3fN6AM T r\Uэ J)Uutd)1/S}G=<J#cjIɂ*B񊈣Tb(D0& DV/,-7U&`Ha E;q[mm0e2!i 5 $A*O446AX|^cBjHE]h a]c[uS!Qfl %&%oӡ{+FJ7R~YXN|H-l5¨9yqΐcQ[F4W{I#ӝ_D{sFAk5EzŇ(җO}}yGzzLu^]HF^So8/TOCZ%=Y]ݻhIjAzAtb0=F쨓F j“Zv[іDs}08eP(ltSpGydoO^K_MLp"&x$D]涤%ZOu;3JiXL^<x#3m!U`Z\oY-N1oZ:kTg\KJ 1ZwYCƴgڣK~!k  Vj2zBnn%}*N0- V&}뷦b ¼Dɒe#edO({l2+ۤ<$Wmz ILu4Ij,*Dm`P'*veOzz|fϸh%aϕ7I'{50&)9 #q^.J]1exku6ϯ)= ר*{"~KV5"Y4BĹ4"Zfz|qVa{P {N5D !f+1oivY(`,+櫼s:bi ")~6'֊"lm~t%e7h[= 4MFOHfӫV#ǝ >Xy`\p*U@>Jk7P^Vcq0q@ 跣7\c5-6uwUs:Z侎w*ngK;Gq^o*Q 6J>ş4_=e9v$!yPMsQ][`醱T޿1 iV{r-,!s,?15=AQ Pc2mD<=SvKdT 1GDCmr4gL9T&Q>7bo( aDDWxC59`?{ܶ K/9'~QR:'9jȌ)Q!)%ΩIeٲHg2$/G82(P Ex}gd4\EQ)rw`\ͬrI̐X$VDN̜YL$@8p#)`‘TiΌ,l4X,qhq[7 8_ԃ[]Kʀ#3H`;m\ױ ^q4AT~|\B%d:CjjK_a2Ku3JKGѸb('AQb)tsE,UF6g1&L)9қ *Qʃ:ǷK-rҌ(gC9ldRU{u 씐%&C;*rbPy6NT})cAf[^jCVP`Orc3) ͐IMEp{ky ۔LR2GKxP[}WuCNH-%. +xY_0TP\ e}JADhY_*%`NsXwЪc@-v;re6RQߙn)i!d \jR?S&_M=ll,[xѠuUDz0_ȚLXqbTJP rm-5hTQgLU@`!27ƚ!ssp攔Z5JrI#MZ)٩70yÕ(B|Q}ȇJ׫VBfo$=KH ZOe$<%-ctrw핗ӫg.BAQWy  Rm ~GY6bUC0bO d[3Y( Nzl*ŬJThĦQLnۓQN̗h Y,gs7OgK;ϰcvgMgVYEɮ6W xɖ.bK)ndH A@oː3ќ=Oo`.ݷf5m龝vZI.bKaVM_j6bf<=gs/tX ]W*¨ԟz[un->X`~*(3\{l3\%s￙ vOnhu%[snա}]<\#(1t$SŷWa!np~m2~UE}md+"+YLQL8;@} dQV=c;l]>{A[Qw'Ъƞ`;l f0IL1REPj)b")UBHʅ5iNJ1;:STKf9ʸJ["[)&'q$v 5u6U51RE&GYEA,1Д8T155 xb38H'QriT T2d͗ӈYmE*B ?_d A|q u xI]ۥ"TT:!*%hAG$U0RQ"6cpV1I W(\g&3a,"*Tw-ZoO@eWVe)^2L7M#7(]Z(D⻇e]2;>Vk³KL)+9>%aRČS7TM1Ъ$Z b\QY]K NT3z)ݛjce0ݻ)Kd}u}MoWi/rdx ziKؒ+Kuj*"v'nݢV7FwEI_g)łr%(6IȆ yrY(樃̑h].k h (:4} L2b%郭xxCu$o, >B9S%JcI0H\v'=S$eS*|Y KOdU4Mi>P7L}*W,1Tb?J)YA鶌vLOZ;[Wvh rϳSRO;MrVϺ}lѩˍFWg+|vU`u ~φ;E@h |R*&kbXd\!Lc?tƁyZ m͜(% "Rg*>8Y)XMnA .X91IB^Ƹ5ti%9v DtbǨ-""[nu[E4JJZnj<Pb":cTn _OY)iꐐW.Q2EdUA.hX BD'v*p>RD->K1P!!\DdJcKrd|4'M 3R#~diNɞq&}Ƒ¨Zw #ՒΌKg"R !D>M~RÃ"LOɖsr4XliD/"d̰'1T[űaTkl˻Fpj2(UsL0 NHs@RvSrZZ{$32ЮVu^Wk4!]btqE[=럆ITES.hĄ-ctx,z ,(@\˓/˯Ӝby|@_>'_ɗ_L`sQR=O~뵃7Ӎظ‹ӛ3^zz~4{U<~`&.bO/fC_SP1fS1 d'녰`g1imo8h%㤙ţI2wO?"Sps shS*$%!`a&O~#dMt$ǂXԧ1%wj_ݼ-F65~v=E<\d\lY0.q in8ys!7~3n }jx'ç^%izckMfw` f!u MmE]"ێ<7= ISHpl&Z]N&q /A–1}к8:'}shisJ"›E 77~e̮ٓFp'nan ҫ#?lA -[8UK0;xQ8խO(HQ=V@TKѹVu\hͼͷ+Η>}riE>e'>7xf<9J L@㽙-;ˋMXJNՕv!‚Wu ]e%X׾[՞´[\ZW;M}?eT-ᔦ53{ق;iNvA"x%m) wP+)sJ2Dw-%Uexդu V-^(P^?E5afO~*+Ś:P! 'O.I3zo"dxlpSVԦx_lJ2k(k<_]rfb'e(XFy<8. S .%Ep*79#Lg +x|6}8}L<%탈 DyPST.yaE9D+ R.#vgͭM K $sO~[DB=A?sϿ&O\ ^kV{tѭ e nqk4Bi2J/{~ }*9;:`7X~fvOXfLntZC;Ih O+wZys:@:f܁# ]\ܠ~n0NJ<Όoiդow`N)_~lؙaq{[OfWpK1- foPcwQdk6{g?ǀV"b}V]\Ѻ.>*ƛV+Vmu.n&%>R7G%j|zl>KL>hD.qʥ5sᦜ$2H:IHSD3$TixO)Neٜe)5T`RYx(r Q{o9a^D(h˰}7_cf" FF-c78EIh<ѧa HYDh?W 9޲u)}},'8 mL;x}9oԥ\{̥9:a;MCڹ ^ijrr1 mQJ3_1QɎa΍Թ<Æ:k!Ҫ\f9k0eT8KX$pG\4 8 [SҾUouϊAʀo/XNM_13DvE҃Z~Ỵs,#7!+r e`bP5GG24N(ɭI1,2!Ƥ"$iҿs7NL`l6ݎLz0L,mI|.\juJMǀ"QIFhn$͖M\a //.0`T`.R`:[f9&piFRK(3sw6}C])sb ]34ZPcf(e%k[_#S#̧B*Irzhʰv2c9a1$a+Yw Hq 5Õ0jAq*R삱7}f#}_MB8fW49Iw_M +K=__vNL6@2`ܻ[>.O_xsKh<\8){] xlpp'pm 1B$Xtϟ]c%4VMCBo ӆbاS?=U*W rWep~ɵfV㽘`/^o8Lc?f|L3xuvdF~z1?iSb6vOn!Y| !|Y Xx?x{1>Pzhx ug*uT'"(OcI"V۴Z0 r&<,aҦ l`M;fBʍrE+!-PHwxo.?\۴WCAoPci-t5G4k\ᶛWR»J#Q _݁ ,j2iWBIZl15#ٻ޶,W4fiIYtt|zȲ[t-IwQ$%Jf#Esn{ nF!?oƪ4 ZS%kA[!zNNU˟W4RFT׽ ] P%wWDtIM926)3 7l0 Qփjgw֢5E `0~/݄>NշrBOgh:aV .r4i@Q=* I@ \"=k9"B/ZMz (>S~s荖gbU W-|L.l7-izw??YPI) 9I,{))U+0V.9{hMPEILsLͭ/[P]_n)G"E?'o1e` e,2C͙D20 Ӝaz}OID "Odir㩳$AUY𒨙LY9E;^Fgx2 Th*&FJD4% MސKnY 3QWFŔ oARֶtNv/)fɐ'!cL)33C&`)C)>5h" @Il%@gEn^YΝ-Z:=p 0y5]:,7H{+C m5^u (7eն!/ $qNΗة?]$7L9%nȜp!uEykvtA9t hsqٚ݃' TeWP'6~cUbk9~R],uy:-A{rN2vwaꞌGJsA} 75Vy,cm.j; \Yj΃JN`W}"5 ɅdӶNeCPG<076JR3qW~޴(tm=&cn&ezWS#]~, Ѻ; /Vl8r44b}}D[[Aċp9k۶3嗣Cln>})nݑ3җV9|9š>^kQ mmdz]"Xw&Z*zݯ#Rꦷvp2cD қNfiw}rWH3M,sk Xd/O0}W{wj!"G8LXDb;%e`Y}Y 輥Lr*v Jm*["z) )Q r+h)nm!k LA0#8v4ea[= ĵ#fD |XC] z1 fbY >xK\ j gP)YNI};M83x/&=̽)WapP"\F  Ȕ4g[vҲ@ Bc)m}ρcAQH8Y{Q K1ޟwU'8L K]' ծ_7o(a][ pV 9rt.G1<0zEWKE4S4\̺@q%rp&x-1 !n<鸾OJ8Of[tބ~8\_v=#G?zq;<\]atID70Ƿ>qr{5^\>MAobz+7Zϡ@K<SC+sv]SS2Aæ|zQ svsx9`.ߴY O7:;KYz\"8g%Oq)w2ftP"j(KH Ex F@,vD[0Dk`1wf@0s ű:!v3$vϕ0?SDh+v{`'atS)iiM(!hț]'kcDgLV8zɤwQ;$^yƄʙR.WFQJrT(mgdS|J; VG64E@t!q\XK,X ChM-#NS+  R+v8@ N\Q@%`m 1BCwtNJk`JXB|Nw:Wxq]]w! hD%O(BVi| A𒇀d57 ŝBD=&w 5aHR4W~oiHp`bcp@rZyB|˝QZYZ䘡 ]~DZ4!uȃAk\fiFR}x)]d=/;MKONxήE="0z~哟yxQ u|*]Tp Tnw/ߏ(O9[ 7m($ʊ9v-dj/~4ɞ[h};ͤV,1 b)URPcprA5TT b1i)a_"3b5Hig.ZNr-N@;.Zx!xeɻ$g@`pT=`E3=s090tHs7@gg''ƐFm-KM[9QKjl}7AM)vII*Wmӛ9ɣrvf^L!5nɧXJ|T~,Rgړ'ex!Lȣͼ*)[ Ӡxaz3xNиy,WjTrpYX_:ث 1BvԹ1n3ɛYOh籨C~p;{|=nuz'W_ՠo揾`^͟:eT tJ [dq)pQܪOip^($kr#,UL.o(]"I 0jf'*z'*xIT+-GU{| amZ*r{ &]0ޏ'Y4iH/߼ Ҡ3Mgښ۸_ae+2. 4*=V٤KReȢ)ٛM?!)ކ ¡Gpuo@"(Uܼ5}|gJ?rj9mL#s{bE%ΣE1\<3]:ՂÀ=ldWh縖D)?]Ka}ݪEY6c\Ѻ͠%wiܷ}~.pjV2E*}&&QRDu񮳀-`ǯ)u rނn_.ht t[Ä.ga8^1d&><2)[e@+rIwkHZsob|R 2ym"m{Z^ U_x= Q&KW%;+nxŗ %Ri?R}dowGVU/h= Ȭmʭr7 ,+}x#H|8}cYr4Oݽk4`k{kX7s&., FC*2fg63]t?l gg)|::f7W>tx)mM 8>ޅE@ot~&~fa.~uy֥_M2ă[.:|8K[ An`fxJAj6?bl= UJٵ(d-igdb ƭAXTQϨcz& D֛ukU[:\K-b!Ƞ`cX68 ڌ̔mYPgؐU嶥Mm柄6?OTI^ ޫq%C`8k.A?]Ѧ,-`Uދ@]~gកBh9b9|0QsFP[yKvs=De@x,RxkUCc+e-zrџ~ h>95gt`HӌLh#Byy(ѭsTNIeb xѱ7Jt9!kGO4 ~6.`.w~Wp2a7*m-7UIvA{xMq&I0&>ťtRLD.2=LD! +C{L7Mnn^yl+w}^#VӁg\<F]n!ɴرu,sg=9a3vJ?E!*>ViXH@v7+BMyxsλ4/9x}iUil5\6hkK]T`I奈QK؇^*ʡϊũ*gvC AH)R C4 h`{?gggg{AG|VѣAP >רjo] _ R #Uki`܄^^Gpq7A1#D"X_>b'fzƣOكuyo_5Uzhnӏc1'y8ɘ~^q&upn7w '19WW:p9DqȴMCSw7!ӻ]pF={`ʁݘE8ɌR8KLqQ+XX2b@@qE y,0șyfr2u{g&R0ԟ\Ԩ Cn$5&d&Gy|g+Ę+]4 aIQ>mcs1!bF:1jIb,^MYQ<Jxͣ8˹He fmatq);ݶ-,R6z7|c&]wX 7nsLq:WcxWҚ SQ _]oqZsz107W0>}Zԧ72Nڑ9@ g᣹ߝ^&iD.Z0cJeۣ7tPlW7]XrSA>/Nw=(OLD:ݠ6VT%z z VUi;Z!9bCQ׮4FIJPP}&^(6rO,#r5TP%1S-9[!j RT^\!ÍSPYnfMof۴vs3fy c%X.XPިc2҇AݭNᬮǛ=CBwFngf+|-yb>ls! ȕm@(id4Y.x]:5!^s%"#NqT hO2(ɢ1h(ҬMVc"bnPYR1FYyTR;:C:'CeD \\9>)9H1o"e #pbL1y\ɰ rme4;\`5Zrh\ܠ8H$ |#UH=whBf4ƌ tE$'@;CG-.9.EwURDΤ  P~ tnϲZhq]Է;}Zh]]<7F&$NfOq@]( k3EHXKYkqtZ^ ZhM\F8hโmp;}"1<!P*d*@TFEm 04,hme) Ґ@=J͛e27)E(siK[HPaהY4ilŽ@PJ\ad\d"%5Htr$f'L\++RTzuRX[rS͐aNc-8}YIZ?X|dud\=()!CO0T0k]RuH]'q8Y*㌻(MBfP*(܍ށxWdеN62~T[3TFvWYR%׸qtdMEhA4Be0O.#Rp/m řM1xEʻFhā䥍)ƈˁXܭMic̚{}HݓqaVpe̐yP90Cv&9Z&4Ԡr5_%l 5Wݲ*ƍl54MM4m=t Q:3 ) xLebՖ4ЈcMQQuE_TJC/=S+3>ovU`;&!b۞$Ҷ;p2rա:A"-uoLg8q=;?zZ9Z#[{'e%EJ"Œ"Ň(Rv (5,L;AwAO=&Wl[BAGAFo=豚[oDF5r> Evv^6ŷd|ygČKUt>Nqʽ&4Z|mE;EA2:ݪ??²|z v gv_3fs{$?+R J.VJ7{Ps ZG4WR"Z_$͍3SH,t|Z`!d.r7 Hofe%o)C[\y 7#1)4J7gSM8dir+-dwC&t9ՃdE8z{sEMo}6Hzϟ f+ WIqX-KP `Pp,й9D;Je vВђ}N  ֧湔VL? *S.(^ 1*5WF34Zy)_TRth?aɕ/Ȍ8Oೠ^l@]K(ݖxAEhY,(Ѳuec t0BB+׳XF8y DZ$\To>'ئX{V,rU{d]3أtӾԤB cE2ͰWxUDPA[B ;~ƻC>fE3joxgvR)kf$!,5Z0"Uqr|A ބ `-!iZ T[ʌB@bc. !biϔcXӠ `{WK_t;T9,,µaRBT d CЊ2A9,e'&P˄dA[\}&?RȔ_%koӘ.O-%ӳ8&i0J%Vt훶D"""gBAq3f.7bi otkL{xv`jb=ʼnSjR&vexp,d\jߪ~p +  .x"bԐ_5.<ṅ=QjɮZ}bHkFZ@zjD0971DZ9"EE} (żkΨj(L|!n9c*)5<#Ι^)21^%@m24" !E-&':?Z$w)ҚhF*F9R F-gYCt|H3*q5$WFknEdLR$ B8#GEW )~܅ .l8waù78/32J\b 2 V#+eD` jtpq@Iǩ- 3i:UgOl8^s/3Bw˟͵"_lb{. S }1~ka^Xl*9bG2ԭLEbTu\U(jj]ϦŝBXӐ /{YS*g(MN(2@^/ sh)E*'TAH%RCd qF"J3ns!E䒗RL#啔'Z9+gc0 ˬ6:c{F{8JRP;*`E+~?F&G\Tp|ʃ+o!boQda0zغ0NC11P`{moǙzn[~I ,U߽_A-:ƒ07vf?y`6y ٰ֔x' K}E0b27gb41 {7XtW+W.)2E xԹE햊AIvkj<1<Ѣڭ y"%SL>DhDdў-!|#1N a3-dizCQ?w|z zM85 4 (7d4sx]EpG5GfVDw3+8AŐ6mfe!iiI3Dp."Mh,AcǯdZ"4䧊a%@^m!7[vP7U) 7.>|l,d=G6yPdbca)k,ZY7;s(;|q'ܙ2վҬI46=4t,J-nMGXh ,ܡiJ!*,Z]wMsKAN4PuK„sR&ͻsb"X܍:^#Q.RSRQ^m(HHX _I:`] !e7"6. u*6? "nRMġnK*$#hlDifDHMrBZ;W0l4%!cgƋ&aS9lXwC0L/ ``fozMrG۶pvnm]|wlxz"F?taQiaa`{<:1uh67^R`J.67?[kV"3%\JX%i t o JA5R@p6m QS:#y$ B@ҠbUR܆VBu nYl({E E^{|7/Lt) {r-?U+¼?.1: 9Odޡ;SX?g]VbXo G>1PVu$5Vʘ`2U TMM)iIk`bcܴbJܴBMXzj%'j>,|qV'#EVO&}  , ,Iߊfl[HF&9'%3aˡ @yt L#zhDA@,:S,물p_^.-#! e9RiεD1a59-6gp@- i`0ܑv_*HF9^ە  @aỴ}>8!9'Lr,TS$.ZHq߼_/e):$ <=_T,4W܅/aeS3N<؀ D=Ǘpe'Wl6=.„{|uܫB)L2AJf$; (<.4B.(;S8l se .(E0VR%mmQG$~ع+"EY170%{Œń1 ڍ3*.tfu{gP=? Z-˗u(^hY]++m#Incb4ۋ6~yǠFTXbulӶ,,Vf}WFFD_߿k1­5G =ΖVB!Z0X3m44rH@kh&R)Z lSr|Kbǭ%Q}.(hKi|CX|p{E1(`fEg, }Twotc@])pq= GP6 m1K`uJ/B9J Gh4n_boc›0sP* F3 fRML}#U iMT]E(7>Ƹrru,.u8d 9Hp҂a7mpnyz^0X|a$2ʒ/oZ%3Z.2OCaYH mJXj JQ(kE'7Y⭜!_4ǚlMOK35@ͣۻm2*L 9uajm[1hQ1P5v&x` Aih[[)'2OM~c ˲*1%O [ MTP`c4AA]:u}9'S)S\!W/_`5Eƪ@B| ,C{ ٤Ɠp&T+Ay[`j-B`׍уZ䤫, Yۆzt+v =dUu.QDayp % B9E30B:_`j9Jq-3(!I2aH ZLpK-INy[sً(A49cN#R@v,fmɢOGM$Bt-B`*VLD\(Ji TQ1!)TZț^{ADFA fQ0˦OHx"1R1/:iRrc$Ɠ0 &RI\UſC|[4;Fđ"oͼPIqa<]eܚ,2Ok $`(,˺x*&S1AZ?~Ry\Ls҈HJ0?SYy/_>lQ E B[ BwKIc$b(yhh/\dC5Y͟x\ *I9j' mE3BEk9t"}PHH;mgr#C FOQle$QǧL3h緽zsBuanfj^]ܙp`/VC_ȹjљA*r, ~ûy،z d#3"< )Ma% ©c,༧F1.QJBr$#Yd\§컱Jjs pFW 7 쒼^^{={ѷg \=TogoS7Acg(ƲT;^N P>hP_ny9R>ckLJ0Ip靫1Q8-Ot0%42úSDi^颹Ta ϔ˚H}6U7 4 Y0fY\E0+(ޖ| !ۯ5@&;L|XH 6{Y)9fYY[Th)"$?]6|b`өnq-Jc=pp8j( "0I^-?AxU>W]+{7m8W!{r[6I[}N>h-i/cJD -;Jc9T!=sbޓ"M`m${E% 'Wk#JʶOaLj t}#gIv( m70l=8%k6._\,6!jL+=b7uS V1l2BxDWSw|4s)J`ilDpL pF#AQ"D`., DQh,u"'.b:b>ZD%4PJT\x2b⇷gIYpmL ֏-8y!1J"}``[z˳yՌ#['py\ =0ggiF px> ߇t雋 XsthAgoz~`x{ F\~ sSx읱Ŋ D;$i#|]] U@XhI}û32_PR*܄*b;8]Vz0`گ?)R$`+/ XI|ROD 1pEkQ`mCH谡{d12a3FZ-5AhA` *i#Ymǘ9V(,sqL HV`p>=ooz&bY@oz?/Tp/^|Zhl1fov=x м*zqbZlS~j/,**BĀZ:1&k:.=3QwZQw𝥮Á p?BYw7罿Fχ]z{?_ F˗*?pW ֈSO^WRjͥm^M$s꒰ 9Pi sQJjIkdud;g_cgmiTkͥn: Dʕx8RCfNVZ'KEu V16 "6h&f& :pHkY}2]{c.!+IX(⳹6*_q;͂xQCMGuJOZ!òRǛQo4 ?q0p7pὙ]K<G6m Υm\VjN>6=[ńFfmb"nk;\yb; d]\y}01n?C^徨8tXjq!a{۳|BAD?>6A͢YfQ/do*/VqLSxWq֌v#쭂Dw6ᨾIUps>xw[""2FAYTնՋ+^L{k Ynw1;tA-zȣvQmAB5oiu'ٞ D8rF6R(PvD0cʚ+g%0Kd,*uŦ 2!:ɩZSVctڞHneX2JE!aB(rS# a/4lT u]/Y"/Y|>9'o _i)e*V bn`F y\4.r-ruAC4XOSr%GpdNSDb2 G{<.-ds:|6EnlIKEz~0[5`_])'… (~O_Ō|={{Jlߥ;; nws{yFc}F ?_lEE3x=,?34Vr?3^|4mA_@sPc,EoBq=%f\3um@ZIJ Xl# 64r!o@DY`1ԛ:xeYT\p3 ~^h=c GF:EhxHU;$0(bV Q[l,8I-&{+,V]ea Tea> BWQ \` $Q>*`^XaK@NP@`1yD-0x TMiJK=Z뫮eir5 P\Dv P\Q.c$-VH~WJ D&X GD#SP kV*KYjc0RƾV}ţ#jGPNCs%ۚqM&t>"cei㨖IсeG݁jGwJjQ >Qɭ{R9PG#xhST@$A`b9ްb  :@;DFA:\fnz2+@4H 7q׈1cFޓh5bF"cH{KK5CW5"7[/{Yf?v` `I[d)oqf$tkMvO"ᐧ*X5>In> U]r :g9nX3zJƒVF8I3oH #FM0Ҡ$(^RF͚Zy)#CaJαX zX#),'ߌaif)[aεm:ځrP4ZHWdi'Zj֊xaRR裙/?{4?_G=!1$!};nuC\BRؒʚP$/)EYQ$S'818QpQA#!kŠ֖C BXM:˞ű**<)bB \1 B/G@eATd wSB-;#k\Jcz`IɷpUr6\xy ʤջRK|U~3p׫psuꪎѽ~^a2F)87cjQq%tWe`KSm{_5壭 i+CWUN KDdH_}E7r!J1-'T`ڂ=%ރ6Y~SS3,н5q]Uq]Uu Zh pycgΌsR`'ю2iYc>(s lgAP&Vߡ֮bm*֮bm][; VHiCeLNk&iԍ22H%6Lß3d# 5 Wc0'j^mL i!;[4x"]#ϘYQҶ*tv^;D HQ) QGN6jܓj)z4 #SM2I.4 JX…FYf媦2ehLCz\6!7 xRu9Cq@ԣO(,<1t)1={lt =zjױ3Uu𷯇a1f=3 BVyإb#SJnvg`Ѡ#'.$3\\6RkNf]Қ  yxPEƃLxP طq#l)ш* ni nˢj2Z+%\#D!'rJQjݷx6v|n.D3>=ѩͰsB 'Y,x+TJU$U"R&UJY] l1Sx.$zaH0;bN.);-'Fk-!!Ppt\ EFWa tGt5C51y{ePC<( I;>z%$N d%1٤!Uhn :(Pq4GUDTv, y$pPLQ0Z0Zc hQkK(C5ܻ`ix8z ?evOZpZ@ۦzܲ8(nC1f"EI"RuR 3٪;<}ץ>:+.D PlRӧG/ Ç+=]ʣdQ)Y,OJg831EKJ&5Md6 ]7wq~_Ocm"}Gr5T f3Fc]̉< Tt*Ӿ ସJ P4{ E@/_(#`ESC"TMJaI> >>}o!m \+LH@zE0Q Pclj?MNomYRؙ(oLEY_>^m1 a?XM;5@L-Ęh24RhyPX#-⸳V75L#,fJH5۩N:1,gE,R&;bM%>(9GqJIk۪WD)Rn+S<Hrh(Sȶ2eQ&@ӹ1֝WmQDwRu';uh*Z(9$snCZԤ4'C+VU:|~uusixi9Lym>}櫽ysq^|ýI_gL'*Sz(1yAIiɧR|j)=$Omo#! ^lWA:pDL uJyN @(-FCJ8#E`xktݬ;[k %g#> m5N$5>Z +b%\uwnN]/ݑGGZ}Aӑ]FTՔE.u/h&f/ğ7麝T|=)} Ok 9%zvn/h|rL_;fxLަxLR1#xz dcD8j aZFI,B8c@i ׭g_(E9@s,uٗg0JI$JԜE)jY#%#)?S\_f&D (N󍌭uД6K& ^;^Xl/hàVP O Sl?:~h"[my|u>4{u9~n/+ow..gâ iv;hSM(.wp3Ŋ̞IaZOnj:߿`"bsVg% Qhǫ9vƔD=K0- :D``(ISJ""f`0mdGjJe+m{OGχI"m|kz:y&tz֚3ҭuZ|ǒu[>(#<5'WD(Q}^WnDx"3p1Xi/i6"Ljd(Z|||-n=0QZ|DՌ_$Ktzv =HQOd[tc~2)'E _a@Bʣ;#>ZCZ!l;IhG |x2"fca9%QwZpf 'FDZxDa#OY7\{RH1SG@O 7Yϊ(o-prn== D]\g t&SN!d:j~f.gr& z.Yq6?ytfnpMҤÐ&UeMn Rh`gS4S@$մ1{vy3KONF: su T^)`n(wZiF\Hsؚ([l4-z1$!d?@ϽyH'o/>}Ky4zPaIWIxԅ LJKXÂn&eWrR8an٢j< ,拋GbFxj?Դ-B"vCc?:y,";r?\;E>~٣=nSa*GD4z`{^]Zq8AuBPR\yR'\Q&wEi*"jך, ӉZYDtt@\AuBjK+ LK$&[ru-1&x`o'ȞbYdY-ME-_-=_\R'&~)F'ŏ`vCnsˤջpCs~ݫpsuꪎ_y*rcuU^B|֙3veǻN(Y)wΘ!(8XvF$PIR.:o÷xtysun3䡇=| OiVkʫF9Sq`MІ 8bTz}&gL zr!Qݮ]}^0tHfs *\35MMlUr.ޟ@buhS){{L`+נLt^APO$M?M d49w\w?y@2"5-,u]j-DlǔT@2T0rq@(=F A7@LjBj8ԵD'DJFYoh^fc:ZZhfQhy`= n 5Hۏm-rqOn:wz@/} ϵSYcc \$\hn]LU0G#6:۷H>{w$ı 7,Z e@-;dܡk,/.|:P?y2Ulj<Ԝ) Fm'?%AC @ NH[J"`gTDʀ3  辙Kd(^Erɰ>c:V57i]2lB۟\DNeM$5DLbsp޵6v#"e{)qf6%K1nk=ߗ^ufwĪoN R_)nbWc*챣Lyk-H0I ot4Wa,8[Uh`]A kTzpSʠF(̄Q7+}Rݐi%%B\uAd0i{p;0,cɾ3PFx;!1i2&!Apt,r)qN\mGH"(ePF{2캔s o_G F1?(=h#3kH-Q'?<_pb4rtW{ hp[%9VuV- e =&P؟WLik@[EԵ\)&Աc٢$>qD!t҂|%dz)#?^j sY\c},s[raTE#gQ㲄+*W[-/\1TڹF 0.7^?OJֹ gH[}u^?Χ ]\nL4))t[b UvE2TsЊzXPmUp14k ˹7$^3_3!%CJV* MWF%884#AT\8GHIjeh{%1=)!D0!=4Ŕ)sVN%3hu*9KH6}Nizkvz0tPֈ$wd q#GA'A5/qך#ߚf1VUdž$7n3'iK)=/d/7SvC:l 2'W1fN iyyJj>WePWLPo˃ZұK6GrM O޷89mO}kc+9"Jzu.[!UBmZ~ݰ mR,р00 &]TBH)hX?%Y`PN~Pl{eW'w&]NVn6w>hbiE]Gu-iQv?E Qsa 8L0>S!# y#dXJ['?Tc}^Nv5HO5!b0GսӼs2#3nlqG-Z6l!'&sLJL9i&+l*1Z+ Œ0hes#TySUťa);n;D'`Jg`)!Z!mX%-&!E+"3x•dsIa-l"30+o{-Ad[OB @)_ IT & (6hs&N}wtܼ&Gr$ fO.NjۛbE?J]\,>;z 2ֳ`ŒamFvRY@ Lv+: CK`1_ ZVQ5,(19 KYb<~  re+WIa@"Zp.„ZlpYhr̀1|ozLE>F !!oMp_Z0 i]#s5KK'dcĴy[9LfʻiwʻI謁u[T39;mōn٬սz/2r/>tfa,Fm!uu7t9=1ـRiH5tک 7z)>4ز~V;g:ϴxwn-~ r P^Ce*rN(%؉61z1+/QfrE&hʈgR SsJcFJ*˰ƫɬ*/ApV(ۻ4PBFWL;`ېg5GI1wT3e: ma:cގuvuP8~^_t %ž,^my ReK3_b Nd,@f11 9|oamAغok"{28 ytKI kRD9JT"G 8HLh[2'jm:,&}8{|0̿x?۝07g&lnmeVE/rvaԪQB8ظwW|8eCgɮmY,̂Yk)c-JQv {܏nPla~X7"xk9vnNi9ۀli#yխ<1qukB޸J!"}ؑȡ(W2Hehgrr”jC5!6Td+'G)A\52HS-l,ʈe:T _S-r 9H=OnFݔʊ&ɑ]0]{Oפ^Cξ"Jjbkxm\@9G 7 r̼Ny`R)چ.T*QMU+r3bLHakY&i >Ȝ03H>6oNY맷•{lV=*s)ٝ~i0M:8}%H1Y;2#%e׎kKQϖ&O/!]~T5)+z>Wu>oC\NAF%{Ѱ~èGAn #3% Y߯wN;ϹDk?;~bn] ˛^|_h k}N'|_~-c!{^TκM%8 U\ "jXj7s).]R=; (%촸@Z;4`t0f<+)VG wXTRD:OYh)ʺ\E[`ox끽LPc4uVbƟ90cJ%gd(Z_88°feև,*,1o,V-j_7ޙV"@`ŘZ1 ,'Ya4 . xpWrk:rܙ߯^nHGѤ~bt>-\Sv~߾񁜇<|ER2%Ywa#"Ax2663 Mߝ[͗?+˶pA2˷'?PHIpggO{<8Hȹbs`_BV (($6ڣ@eBQ8[p7x5Jc$ 7Q5D൩:F9Ýq<{ߖL@4#KҼC}~Ǔ>F #!KBx(U||I{75F#Lx1b  V/tG1Q&81B+ ^oFi8z1 /cY5<ިŚV2^:j /K=B)S ( ? I!@B *>˦Qc'd*ӽt-QehߒQc;uEMۊ#SaGN9* c4.ԧeW؁+Fp0 p2;R/7F=t:zV4ċN1uҰ^_ v =gr ^uTPG QP2J\35R9mL f{ݭӑarwtNmf!ꘑ~.]_ġ'Q],>^I k)2*P(W9b0BS$;&l=N1CPqxe N:Ma%řQ5PqL`?C@J  AyC;V ұFVBt*bePa~:PO>;\URSI1繮 aBm O"R;@Jp^1<42J# Pw^k'Enl5Yk$&3zhe)޻vgLU^YIT5KPCd#QUㅼZRJڤl;HU {F!UoQFX9*J\{] &G2B(ڰ^qv ׳Y&O{}@-GfHrk>"\9s(د^!xz!}G<\ޜ*&?iLpp EPFe${KDJM~#),W B~8bx +1 YcSVp4FF.E9^SF v#Zc~on@bU+&\Imut|vN+?N+7;t54^G#Fx4e akJV `.J9G)B+x;3Z0faAf1Vܕx~D_/'Wu]% *rS5aMV][0?>'Onw~_XpR֣R6r&iIqig^\vL^F"8XxI3!%-vHW!]yS1bBN&mŴ?yA[U)L3S**' njʅbyʘ$",u(q̏z礢5B=Z2Q_ا/YLCÚVpyTy)s"P++E4w({g|p% ٬ځ鼍b JTGS5ˠFS§xZ tYi~g+فQ HnRsa(ΖZa֨D@ =nDAH%"btah96-z(򬧉F T(׭'Z?WAʫS1,DRN!bо@%<-K1vJ= g##r!odtXpekgӿoyVS?^flmnmp"qpٻ涍$WXyw>9NUj.'I*k#QZR%wu}g@H`%&4~zN12ؑ6M6Dضiݰ8S:Tϛ./ݺ]~ʩT`?{ S#͏לp<^ S{.o g YVR:=? }::ҟ/q!C%_p}bvWlĸ-7]b.{vMC`c-!chK`j~6NM^C6ҐrCLW`kΐ57GÏ 9htN}ulB D/-ii~/{ńwoU5rWn=^bMw{} #8ZVMl=7u*m#lZltZ(P/Xk|4aSc2ߓ'u5zm7MR8d>- ? UK2T37Sc(R:g#'1,mgAiA]*rt%mVEtR:P'>Dc#pF +q^ˇrpyg8<͔֧iO+4Q^|^JA<ʦ֭T{Z 'jmJt85?T΢(34GչeJ 5.7S3,ڴVy6=p e:XIS '+ 1^/JV' N<#xE!ѶGnަ%X3Tԧe?5+4F}u8}ßכ %t6?-(u/6-ϴ4RWW4kp^P%\{¾8X|رj¡x-P*}sS_Vh/ F!ꙎE[.F:[iF9Ezm$KiM֢>[l-s٢ʼf4Qira\::lX+-W91+FTegII2 $ 2\sJh Q&x 1W,y#80wCRX)587P028ý of)a`_}r3BQBDݨ#@il]ܖqj!>V{g̿zpVFU$1גa8yglh-EK[ض]?l[*\ }aihH9xgt +8ƈ{dV5mPBޔ8tR}5ݴuOK>~a7KT,$9ƒDĜIf9O"0N#@ ,Zo/b M%ĺ>lא""fZ*N8d?vNd7E?yEI{إulH%떇.9I);O5m"&lv0!^e˯"Փi s"3Ώ+4WZ0=)/*n`d? I󔻇)41 .ߘo?Nq3}|*lw wߞ]}hsQ1Nnގ(BbVsޚ(N7~zZ,̽U m(ib' 7[uSO#Tt02 7ڃ涖jd.p77NNfş !o?TԉܻkX`J )ol6[nZpGI͆ 1!s,gb18WNS&+,%WDLTEJ&Jr<k>#ˍY0Aid5%< [hbYǚ;bm)9o߮& cA}m!N >XI^yy?hd-PS$TۛA%@彈*nѴ=eXfIKy I~Y/MR8#hxNiMnV"j|qU8̸&~?1l3M|2};-2o̞6k @ɗᥢ` cG+A9h:ZpJoNii zRv ZM{4בڃ3#DĮپkEr Z |0Zۄ9U3!j9wWll6ԇNQ}pƧIi L[[Rkk1ˌmm9 Q5bwA@$o2̕{듑ΌW`hTJ j GÏ) $yN=qt7QXX*lE?7=MO>&\oaA4vY(0k~ݗ^l<GZb">5y{a^7>t"RDp8C@巫jS(Tk,䕛h'IġwZݺbb:]Ļx$48w|X+7N6t;<MSn1p2h۵ 9w뜺"_Ztɸ)utQ 4^i Es (*J0~f`K35Rv8ld ;{pqDgX/ 5+2cņ,eܥU~^oThn/$}]߬m3$3O|oV&txY\k~KN}of't7n^7O<\/|'׺϶y{N$xQE@TQ*.kw^慤6ji ̽xq YncB&㔧嵕7,,n_ ܘozKw+0`qE9ئ $s"8"cip9F( ]i#~)wWS$(ff7&$z778>>ޚ!\Ԓ#ۓoBkh]RQ}x?5WRg  s/bU3HpYơI2',ýTDɗ7}򦚛mb̗FV"ҔnZqEFp"QRp᫯w7=._-Ʊn[TN5oQJ5r$*|+?/W[|jkj8KG݁ CP y\tcQcf}ugpƈzDR c ;%j)@aEs R3t+P$ Ye(XDCJuI4D"H š寃y|vPj6] ȁ wt6l6__.Ulާ*]SE]S}4𬃹g#MGzozvU:s6'Gׄ荝j @C#O~M2:ܫuttS&G&$2рbPlN֊ ="?dnˣ }YZnN0޵5q#˞ȸ_T'[Jmsyٔ `,&i )qxùp(ѥei| 2. ׼J |r3I'SwIv*UG]Rf[kM%xJa1cYd0Ҙb|b9~^ .Cjri[tPw*Clg[RNjɡ=8 VΉ)P AhݐvxuC/HjdC?V@bSjB[La"Aab4L M$UqZ)۬ñ%[CE@pL9 WdWE-NBW3nL\ec%Z)BI*T1 D"64l'E\rLPCBW3C#N BXV Z4PvN7wƟ1cpvlV"O22ע,I0e2UnI~Jf g  ef\' G)lt}ȕ|cūWLJ"(=Ӝ83PS+aJ ,CRyjSBf> 4 U wrk;YIz帆40vЭ_v- 8)c'j n-?ӮB0(X!ډ`4@Zs!yOfO4 w;S&,]on¾P;\ FDsxXlToy~jJxRpC֨O7 LzD]cԅWBxB\KVȒSb>R?y*@@ in3qsλu7dS"Us3B/?9G`H0It}K'Ұ QZ v3v9S?%$0ӎr%OHBmឤpرiػHCQl!<{{]~J8HAtb5R`8|1psF/+VZ3D Nq,ZN>9Ǩ웳Cw(toY.355]lR B@]#G8 k=SheesbVV{90m;)55>fePlPu$7|[7TRtVf#L#]g2oL"xweH,!+Fjκ W.5VGp&̱D%, 81:+=Ufz! ]΀&<"`ܩ/d}6dE[(qY)Sy%s ;$=\Hr/伡27ܚɟ f*'b:& u Q(fiN.@^;lI0V4}"# /)ְjBxSשG^pmi}x -us{BT)Rj8jь~HzwhȲ6#$֝5O#YIN_w0} $VӞI%f-l6HIy/<ݐj*QЋ&nlbLޚWA%Cd@D%a^qM-MF}|KW?fW֫B#؅<" qRa=HXv b{ W¨oGX CxJ8n-pc%h^<z䌠B&K%h}GR rNS+w&nV w҆Gf4$j#XXY@*"N~{jIJ,^ߒ94 (O4ʓ7ʂ:5rh[ՄX J<'|9naN Nfh3-``~<ý3^m>RC 3cEߜV(#|pxf*gf}7txGe1#Pw):a>5¤罫F$Umw1=̐D)was-SCx-K{v8: ޜcnVUD>}ڱ)kѾ)T::';EDV-)wh$iN'U!T)e*tRJ֡tgǘ=}w7\n'~? oZNjb+SʑUـf'e>(܃n/eLDrD6  }sۈ7%趏(Ov(!>&>mB/韮1J0i(PZFVr7p;HmxKtHSdc$Z\R"eoP@eX-w]۟^8Z*9.[nU2BGPc}ɄZfJRȷ nRc}I-Θv0Fpwt8V2bMvQ<*oGy\"cPAR[y71%(jbiN"ifX1" ~OAg :?GÏ~$ շ ]!HK:,`I~~&zst2 "Κ?8ol۴g4X`nKW!uN+,B# m/FO|6S{= l}˽@\0f6&L}6-M+MB+f+wLX:T]BΫT1 d%>*#^R4CX"t\d}>_VC6N.] B'ɀ!>|6kFs$/&<^{˱@Sp7sɘn1{g߿=j(|Y8x#Nf-m88s΃ÌK3t*i^q.ŒyͨHZRmF;ϾС6bC-Uc"d0AFqEq\I3F4ﵱf[0e>2u20o YXT|fRiy>L4& . asOiNRbj\jBSrVj@CX!P%erWIB^p6mn=E!f%۝ts4SR7t<8mQKؑ:7y(P` i`@ݟ\E1G#ڰJ"8o >:ghK\& D"Qa0(nxl1f1icd y_W@>1Fj| 51,ٶQy4P^E]jB\:.)Vr|Y"qN_kiAWg cƭ !'ﰔ qi9Tc#ӎq2;X!uo >WUй}:c|>z^WUhQ;bK+3/8M3RF9m0ϨɬQEJҔ"t,3gu'uXwK`~󶻄T*> o9̔­$/\I83> wkPFAá U Ysvk{ Hȃ^FWS,(=2iZW!M*i]4bV2(: :JQOE$:S|# b:5=u'etN0St1!0Zu53\EHysM~p7οJl 9?\7fz'3~u3L_ͧ~8ݫk?A-wL-'}I|FR R*a9gxq<T*RΌ,ETfNUJ5Ui2|IULBbrbb |iJ)Ϭ8SZJX^FHåLT.E[@׃OɑuDnb`ucR.g۔yAv+0 eŘ)AIPDZ{^ 5&oA #Rc0G%Y TXP0dd?i)vAԥE13TC& cXٖ%ta"%73J}jVKci^W"pN“)Ɉh!t$ewRr٫?Փ 6kn=9iaL3`rH}ړ ~>4P*MP L P5x +T^4x,I 8Uq{ԂCAPP6#7òN(vm^:_@;".zPI~Gٷ /-"PϸdKxsn->8-i[M6t\L6>0Gn_[2qmLh+zZ{{1x Ŀu휃h蚅]:u؄{ϊhn4ݹ<&o~ߚpUvZ}{Q;i)j>L (^C옴I{!"(^xfn~A⬩5 @zj#KB/Y,S0p<ěKYҒbjRRlJn^ ݧssNqeT: e%`䆡 Q:#Р6,2=hܤѭ`珶q=$.ȗ̋S$aM1d@X fB3bF@<baPFFTj6zZNEʁˤ FlQ$dF 0z=r4 BD意ѢXSkrG&AZ$M &f*S{iQ^ 62ZٓA$S,_28ɥZs.0dK+5&p_i%FΧMߞ ,gwg;BLZ^)JB[΃c[O@z!*U=]*a bnQSLi5gFཷuBrl2E2[df<&cF1wY?e{S ǧkOvyiK@Rk%Ӆ{kP8EKBr "Ì̊'_TEN)x I(Nq9GJ[/!Nx6W|I&jAE}rzP-U'7]ym焖!)Z2Ody5xi!,t@ O(RHHJG4yϳN) Lii3Yln?+ɡp;h#3( DX[JTjlMy?;US7;BJn[Q@B\IZtk[am)Kf]$*f^zWNu+ZѠ$@+bv ,7#kGiP`vVloQ;&$$סI؂bqoM5 >z=PiD.P0϶R PIR8ԧCY$9qԦ4{†mv9E$;٫yn=MyC8(e[uQ+qQ#"èYĆѨRb4:| OHU#( @OJ.a6ݔLg1.^ߝj-Q];&MQݦ)t+CGQjNKyrL?ySJ2cԛYp[;ediS>= zovDRަ^TCU=U'2IF2r#B gˆV7iHmŖLDwc u@SX8{p$5f2/?yr=j++U1R;>~I_\,w~C՗Q,h]]{1FC0; , :y9O@XYv]KPIR{G E'¯ֹ!j#3Jj r az beޱȰ(-`ЭohowG|bV74;^_ WW -8zFXiXa2ޓH/MD>R}{ŠMHFc#[^5r$x\`}נ~ήPwA>5G%} Ay5^[r6دs_iZ~PՀv`T_6Q} B q`_nХdhYw^'$S gLt?T-vn ڢQSRj=9q.V."bgr\nq* ٔ'.#/2\o>W ԫ 'etMwW7?G$V uW} @ֲ&س|9\> (6C^Vd@>G?ɲȏяќrib/[ޗ oؤٕ6bgˋ9iq2vo{0cqXw;P=2F,cFRP$Eϥt`򏲞oؿLוƧ>Ce'6'͋)LzsZow{?|M>xKw `Fm6ݫXޙh,d6 Γ7b* Fϴ#S:`87 ZJJ~Rh DK 4l!-TXBD*L"8RpB<s :eE9 dK @WØV:;e)ɩL-FD&J\9rR`,h&O_i(R>` )HHs>hz8la&A0{)bѺ֘Ph0sBԳ#ꁯKf*3! F0IR'1#$ H3f@⣍̷ƻR3~LzC6$C!Cȴ'ey׶1ġ*"ڄ[^޶f5c:m~hl߮OV@oyQCcGËg>ܠt{"-=>[ڏgRkPo_t{3J<㨅[  4#LmII&4gIYAԚM0U\OL)TA)ls,j^&^9?O a&% K'VzQ{'~,`!Νh8z"@$r2f c>*UvU_Fak^]Y'"ShR2RI5r@/2G? x)y!pr^Fޫ}qUƨ%磥M 4QY:uZ208c 2_Ȩi-,G7Y!堣;ֲֳT⍬&ƛZxdK>icϑb{L@ltl4[Ÿp b7NF=^[i3cJTÝ֊~CZԯFEa"#|*PSdϝ65[rO[Bh"3ʁˤ ;RlQ$dF 0z|z'J&;5`+IkV`S1V9Yk+ fF!f!&y0СVTq@`G] 1ȏߓ\[هHgY!nN__Jp0c3x_(g@AZڴÂd5z/\S{",6[^o9tMCqw?.-Q|=m{ ㋻9osʘNȑkxo[7owjԉ p'K+lyi2Oa@tKMtxYKj낻իviMV-[1T]"6zbk++.;מyVmdf>~ ОwRO:fWwCkz1"8:0cy 3+V8!hR~CaGU~ ა>jaó@\V_p;q|z:y]{q^Z pħTږ YP(U֧sG w3滀@6w 'J 7h+ee4nJJC*H!cbkyL!jQ~Hv'#(3e 6L?2,ВL<{$?ON?28Q>)*C.‹JTL1p."I!|>Y]8 nh_^գϋMo>_={œ䖏5!mٜ,mj}/?U!kZ'K\߯HExL9Qʇymc|U9i帄U3O.di8?Zq]uOnLOt'N7t squ~in N诧}# Y?{WH/2406 ld?dduI濧(v˒l{ˣQŧ"Y礊!U.O*ӥx#ütg>P i S-)/K6֨أvLPAjvM-oВ`zkk1pjeL\"Wp1;BbT9"b+؟RQZ󝋔"= o"ćG]nTz]1^ȁ]buuMOڤIIjzn-FKhIeqjȎѪt\*Eҧڥ-J^gj7Cb @;EA H T AQzd(4Z$ iZ9oLBH@`89.8lZ \8ziHEDs*LA &1d-܀!R\ZE4˓N46E^)S[(_ B%{%fС/s5A)ӬG*nSUҋ$^] A :Zt)qOF qHZYējYU4ՙע( Kf:_ٮ_ |t|xYg^&u_~lcvhD=|;J50v>Yӷ>1C.% W,JX}fgfa@J0tBfޱUT4Nyr2DȨkh'pAQ#OfNT5Ƭ\t}]#F9x ps9Nj|yp\A=cnrκu /F[;4ʹ>!JJGk=BK[-$[nUH~)E_r950+=20TpJh'N $G1Mc=C@ e`}.R R[]`"'L4JS hH*c1Bܸd}q薤n'~KW҅2gr}3ؔJ(i3Dw$bR+ޝ)eqƾ L6MS=|<ʈvHs釕_2^v]Q|,]i8ęBiT^APPҲt$dW3&W3MI*_Q]jSh 'eLY6B/rQ);?E 4vǻGOKܳ#TV[i@&?x"DhG $4s~?)o"E.g.<5b 'і.-]F[tY|v9ϳQZs\qUxePpI%J8("3:( ؟O+/ȷmF/|ZLb.zyXDr*/md9>Ňd74Ejо VJ+}t  ;9a,'8`|o>8Imri]>?\=j_`c:fk iXXb& A=ϤIx9d:T`:Ô8BQ Y`r-u<,rV;b\8'& KN K,wI8u1r-Ju{gy\.1<*@2NՏlxXl8}wK7l_[\L׳g!!wq/. SOYm Om<%$yf% T(C!rq̙Υ&T?-G6[TDsyߎa,ۿ ;ę1qfxe,E2^/YSZsfڲ3[௷;N- ZcF MT0.̮pVWTQ{ZjPeGXkm9ARֆ').;g2SҚ(G]rEz|%鐧oفwm[mS9q䫩bj׏kɯZoQ`RZ+1Z%_Y!yF׭{@W3Pz NWT!ª˷*v i+ۀZ1+&|fקl b's?J}(:8v 6-#3 uv3DA=B}N13GNNp`/"& L2Q%[Y.'(a)c!9XVY!(a6h -sZPP -]tUrFRCB%Hsy3ƣ9f%*,/9VXQ [8sG2rF$ 6OI SjGU\d՗LjH$+фVG\ Ю I 9蟴sV;j9!{"R4ѴRSl6DЖ5ު0,^ ?\b{jS^V4$kXwNQK2O j[0xWOJ㥓q_笡T+=FWN3$Yv޸Ss|BWuXU%S e-]/VڱN'[ 4raH:#ܡ@@M$cFjZ+BB}P oWLsQ8›dꍹet]^FwyYve&oxϥAiA:Dc5S%>Lp%#_b>` .&'ߺ;iɧC΄>aIm8\B-B̖Wo%_nWv"c}yVGi8u+T˫gzo^}m&g :b˝OZSp 8RU 3u}AxCB9P7AQP_X d ;Հ*Ƥ96TaeDYqT@+9 TQB)S & 8&sOOERk8JB!gh8l"DWSmCZ sY48rC|޺9"ȸK/ƣoZOz%[!?=~] c5$2n @VLgRˍCg)d?B.V߹Jm@GɆP.DI͇?crj MAO}Dq3Ҁ!Svh7'$4]f$I#ú'+f"Q8pI!]\s9-e1f `bH*0 '⎅<_ 0Id8(F; :E˶odQ*eYjNO4#0&v:=%c&MpP0ZH00ō]=ß^a +v(Ȟn|&zm9Pt{[lDw>/"8b}|c (qeYfUÅ%!^t&g*F5bnT+5)& cV,>6iig]Ejn#5IIqjb[-LJ"=Ϯ 7z̪ i*{{7RuoY][;M{QWKtNti9^A U-HEUo;UC BwNq> U]X)ُ" :85u|M҅"T]?)ݮ,٧HbR2CJ+Xݖ3:"9vƃyS[x t(kn3 "ԇwemH0;C wГ힘؝Þy  ]Ԑ=(*^bQZpeTH|Hdy WWL BA@ƽ.cĸp!Mz!Bh/놢N^Oa^Jb`"Y1-xIQlf#1?Y!"7  ;e*F^ C3#bmif&ht\L1N-SWZ pkEKT bG&?sxV7P`R * Ȁ5igtzk :"p#NgLSJb yck*%}!@, nI ҝnHA0Y}϶rqJAt9L^:H rJ^bIP /Ib TR}`Xx~dr%}_~9 1<8dVȖ${'ݱC_>&Ϗ⏃4?cx2[ι)g*p]C:{ DhWWyW}?S8y7d|Pq)kȥhP?WSR1Glͩ 3=keo^gLY=D]XSMU%Ahk8M:6kӭd<7ԻR9zu[gz瘴hR1v{}2$UN=ߡSأ, _UÄrtI/'Naq$L(T ;goM2`7NT&zY/#~vonCuݙJ)T3A@0E +q>Մan4E Y%w!i TM} ̭,?ϭ 13&jqs>7"a&@:<< 16x~YqkYd+٠în2?ȂT} *u>}>0AkĘBS,`s+$.pXQH.'ėB]7zBٟ0"(gĦ|_X(kser>L=xiAY(@mcgVTW]<ǙcD?/Ϧi/ O]\my6RA4ClQMߓ,L]O% X}%WkrتU"vy8ϷUލ5A]s\R1BxЋozv5eWLKC^>'LŸEa^q!C) f cމ1!E)y(j3KD/1j_[T/MϏf)hSlPRbƑ>#2v h3K~ _-siYD˭Wuw|3wm-C.IWvR" 螎>Wot)EM7mj&HK \PVw7,u7~}8vPͨ)!~@VH CwhL kR֑0igyR;9+{t)K/R"pńE1%6Jm(QE*vcFՌ(!Q2 $,caPژf.& yrݘSJƜ:bK$N#2L;T25ˣOl'wNwVP[Z_ /60 ʿfmc++]@`"naX7{a5~7 ,P5ed{xFd/ٳ087l?O6P xisED,ϤT J>) "թM&|r,ʻ.ĘlMj0GXBYˌuohFV)RJ1ˡSC_óKe)Ux29p cCX-qlVjZVZvIq,XO[j_P) N`-g I/y&{! PBRbe:U;*.5k|'vUأm6=[n|u&&8K*h8d254@XHjUl5Znq$h"iV PQ+dx^,O,vQ 4wcEW?GvN_gڸAftt97GYR%Cf#3φJK7Ts+jQV;4f6NXJՊ gIJ3iB# jKʴZ@rjl j\v{ ׸bj(HHFA'ѩ+Q{#V}:xcX^~M^;gi'#>$:wE'B ʮXE.xP}^դ0AɝDŽ6g*M 3NF?Ep9(~-rvOg܋bqtZx*iʊnulYb &:1NÜćl<2yDOt&Uq2lϴB ynYTBkV%{@PWjOgȱ 9, c&` #ZTCWG5/xaV,.Q Jy*+fكY5( n=] 6DZLOq*$cR, u%qBvOs4_gQ+m6n(q/g4,7O=C5?.tTR<tӅ@/Qy:.ss+7):d6/9] #c 9Yx@3S6괮kܡ@4{]ޮ=?1X79ހ,j)p}~vv޴=Z -5}KB\.{m#Y8dpe0KǍQJ[l p͐\ tSាo&c Vz}ȃXRY(IR[{E9/Խ/ĺb,?n.aG]z C_F|\A#` ?/\Xf(Qo%~3M #@`*3Lbo <sku|ɑF]>wF@׃UClV= 忭wnNxd?_h!fӿrc^#EDhA)|_iL qP N)N#>ϼt M+%9re2XUV, n[<@"Bnt\).9*Ճbă^\\}ӳ(Z }Q ֫)k|t&_ &%'T=) 39eFk<L2>q'mFEn|1dqdv/0MXHWln73Ll.>EdS]n1̗LÇJ {2Ob_VwT5a5;&V+,\ \ka3! 1BiӔJskX1i E,P O#E5ZL'")!$ sAF)Ⱀ?ʏT VQ$D0[a}dEq(@HUO*'mJ!;fV0e !_f!.b%$$URK.91[*fvG#C HJmev[srA9zUć]3;S,'@C4{wns[ 'jƟ8vQJZ2>Yեϝ ;LpjV-|2[̞/cBLB{=JgaOj'a k26 (`Em #Ė:,I ㅜ¦P Wt9E֭7an7qhѵ]h\ Zn?\eSҺjbcP|u5U~ڒ 6.mKgv;@t{~wq$qDt >WL+ooW8{P7pGe>BpG]WÎjO*)3&K,Gˣֽk9* t.K5yP od=:ˬ:3 f6N'b,g߹n}QOEOn)jQh+i X.v4Zz_y^D@LJ@eqwϜr45JkFs(BP°3E}U3 Th͈ͅeIaj3oȦ BTܱ,VV)lHFʵ%+<hdtPj$=[b)t܋,@{oف˚_yiѭn5y5.3QF{0l>O2pnj Z];3_{.;3*{(}1).^L$}_nۀKw{x׼?zj @yZj3/w"p%EhI K70e'3avjMB5u-@ۄD.^Eou6$F}6 `4Yѯq@Qj:5(A1AFT%E+⤿Ѕ˥e`'lAZN#Ғ;(7@/C1FSlSs1b,4#\3{EC[{]_|l.]o=w%b|zӚz|[j1 =ШFZBez BeSd_21*zh=&6mHu-E`c醸ViM] CaTxaUIw"HRzк''qSq=qu[w}ӈ^PQM zEAwe9Opj}SHLX &lʤ"D"R7]QJְlq6R#ҁ~³JqD1yNf6ar*jcvOzrr*nF:YǴSYAcbrC0O^ˊX S,A1VB䈈RTHjSb\̓SfeԹbΜR 1BIֲne\j`bt \s\@Gw'D|W"eT~e* )erxuA>p]No,G8.#2Bqe23_TqW(.ɈU^ȊE|3]\T|\|)krV2eVRͅvK .ų#Ī[(xp|͵ շ00PX_R.|^mR&,&j/ AwK\G`X3}4%\x,yw%"m` ~5XP[ThcJ}j}:S}qHX!:$bKǃP~t; 9%T֒ԸٗS$}/5cCw8V<1~bZ ^YĆ,`{ )1.muw9ỌR!)NB| &]\btEBQI4bkG2Jaؚ8ν۠.no(IXi_^\\3)+،q\Nbvɮm6uoF1R # xɥˤ`]+e<ò!-#("J9c+@ q2`se)N"qDhy.Bg K0ΏBZxfG!?h^}c8P/DKX 1y\>{AڨyP\S9j駤c˃}@njղϐ]o֢0.K[$IjAP'Re₵hXJ~\߅@۫0- 1:yzz_7Vb-olx)d=Mv@ºu&` /ILJ勪_=WB/uCr(B-1w o#qr&E.R|ݷI+1Z[z 7wްޕ b`eтjdimєp^DRubsk%>&S朩`U±ZQ1 pXdV,%~U||BcR1S3JGOaO /]Ǵ<'/C1J ùKp+FB# ¹eRǗq=V^fnv䁠u[ + Ce4w7?֭W`]ek6h|KBLp<.F[S5.mu@7IP!l k|Z=ֵ\u QI>MjНydT8=8={~n(y&>Q_.2HFpj.gϤ]gGsɨ('v ڍd @#]6ݱX\TЎZMYY8U jdjمM /馓h$т?]( >Hu'҄\x|'ZQ"K,zˎI{&9:\X/Etۉo 5C8&<4WIΑJXjeb8qIUN2J0JS+f6fCb^ш % qwu_< ƋA #Qll֩.t+-F(Ɩ]@Vb<67zȹ{_WϗV[]*5U/u1_mŋ+]m`B=M1Qoc( sdR,%"ei;9l+"dKc;ϫ*t8\yg8E/" ,JSf$Cj.iQoe4 &hV$m_xQK 0! m: g80<H&PԒQ-0:"g(jᩓD(NV)k,(;_H-x88͈:5R !;6QY.WjVe.`?|׺]L.=B!غVSGhITFLUip샐aC3" g`hؐ^-4mbWd~ɦ(Y97z/ý3sEZ U a PJ;(1*(X˕wS3|3xs|fV̅QmbVh3njz6f$}&cvfVŚI˖(֕y+3b>ϖ Z%d\i%sͰ A֪Ax(F|nlo%oȦWRe}3őG ~cz^=CBה*>JLHR^S~pT@rZ>MmY3|*)H ͶmT'@LnZ?,k<8Y)BY YҼ3HV(+-)E=#Ż7ol*X.b\4=p2/QV<|2.r?WQf`"Y+~)e_뵳;3u9 GŰN$}_nۀ,dOw @Y ;G;GaJ&e4H$sc0xdO:WM]Hh{4inZtI# |r+F=kc zV `PO۰rc3) ͐I!簦A.׻6:+--l'=oeV0ܛɇ fvxU9.+q@:^޵cb V./$`{:lL@QT6c-Y)DyE<`Xջc\̅ J#>Cy#pNwpd]pU1Y7IlP eRb%B2k"4@\jaZI aH]h :7ߗ9gfYnr4ˌV, :ZHrC%44W@jnRn_KO j\[:?%T'cl1df2BO A ( -Ef9d_r7ɣ=~ 2^Ĭ\D`A[#mNhm9ˆI~2_pqsN;0H®X*Tb"IRmx9qG*NXa.~&G7#.qEo*#ŧYLKᾷM Ԗm+ya@t`2AE$&T>m' ףZG}:  H#''KP8ByC DE@W mù"s8_疔tb*284iȇF38˜J!U1 Han$Pa#_hYeggJyNwg胂28C~*KDJj቏9ԅb(97* :hD)Kj dBaHDDIIC$nsv4{WwV ?@h$l`y4X @aw\7F@ P.22R;# P!q(oeI-KtCBA!F5F2\9SZ+yfRFMBK<1dc)$W_zFnHܺqOXJ?8v)Ri+hp9j'b .vXm7" YV,һj_W[3WMfP=נ  ٪b5^c>ԇ1` e]𺟣4K^iwp>ËmE= U>HYm oe˃Vb',,?.|-k6*8=Oֈ]//ǂT1}ZEh}x~UW{e`"r *?؜U2WzqJq%0Od[ؕd_9b\Iq}g洰{[c+>{sʁĵkuD[:\D8.JDBv;? Au];wt'?$Ǵ=U@p  JjF&z>TĶn/kА\E/)dO*ʃ ?_+ ` ^={-v"&kb'ҸV7ڪ0C{݈4ynmFO|W-O/vݽ%$NYEY[:>r+W'+tGLI +{RV/}Q*^ /mN/WWhO78 Y{?'{ҩB$[TI7pfSWI a&%(șd. [sCփY)1@f,t؀#CDZλ\TIa5v'FHa粨LZʨIVy"8ʓ* XDgo{&s)%F^)E 7 ֨qXj3tR îxyqpZæw_Z7`])Xf⊍R-{ϸ!JdWF 6j BpafdTl?)I(̋Ҫv} *@ z$ߵ㐥8 )ڂwԮz$KgHncuI| Dk>8'XK[ۇǚMsis8>88U0?%3[ʛ/=ҳ7 ~?ʼ+,UA?x~3c!?cec^_)!)ҖQC1mGR@铏^Y{ \uK rL >8c%uI!~>3`PCNo7PldO4E}aj3+"V{ :]ҠK!:9&w^asqm:`׈B I_)<,f[KףPʠ찥ySʃ:'mu5oBuj麷~7j2ڀ6-zF[:;'\k0| _m>to:O>V&֧VwvPM|{f=GR-[% 3^I)Z%wf\.7;&D!d5Ts9a "Ű5C(Qd *aXYZu3SJtoŇwC$=\5sV|8iГ=Q^~썠8KrEkE7cQQ0dEG GwGN1!uS@ΎqÇεD vo[$0 L"~tMPsF>r3^м8<<ij5xhN SV1u KuȺ]mqEw-E[ :*6uG>{o]ѧmz[LRUjI}ŀ}𽈎-k7Ɏ"? Jҟ?|Ϗ v A+$E O^CB)HGX< gŎu&\nNܗUi*결,-n g1y_ee~l#X<=<^XEɮN_VdDž&V"0a=x I][ . r0b&~w1{Fj~žnZM\N"`@ T5s?&-Hp@ /ct waCEשWg?!ɻ 0#AH L(BU8m@@CUD#zWXj U8sLi"RXJg2VӮP[ )>>_|-R^xwȹ(펠HI7 <D5(0[5|po*aq*9A19XV͉l\~H)mg6qe-U3Jtl}DKYY-%$TASHijܫ"2P9$9DfL=Hbd@{xq4ٱdwAnNOrk6k 2QbF 1L_8O%I  R EET^/zN ^$U)Bä"K7K6;G7ub(1, za˄lozD0 뾴"Pk\/ "<ଣ]Bs y"V6_"0BbpYD7qGWeh+& wM)o;Zb7/ၓ#?FDϡ9,rְҭo(s]`I){| d_vì.:ȼ>\h«*Nb$5-O꿸@ۭ=8{p~;ٻ6ndWX|32Ualmmr슓KTX(ѦOcHJ!)b83)AFKF'!X_vL˕qOh8;LjGT0 S9c9r>̊dk3M:,{1*im~}Fy+!X!U{?oߜ/%c k-W/XKC^u+7Tb)aS_l]WPg) €! zD\0gVZZsA.DFz` R`2krў A 'LC$G15d;&i#lOhvIo-X\:$F;+ P|΄,sFh`>oos@ J'kw4ư*<*9u(QL"(s䉢Ve !p0Xy!8= BlؚfU p.=^Wq5_2Oilޯ^pA D\[}>,=0+^vꋏ~݀Qfn.mpV6.O?ژK>|f§8+~?lRݤ~v  < F! on.>LS`= +}KqREo4։f.0V C`|/ N7LC'0eLSk3G,>]NNxQJTZ+GLJR.;?^ P='О~ÌnCUf{,ʘyʉ4?fv>E7o$vHZ#bl[% Rơ 砕{?6 m$ð,? C.t3qפ澓<}X[/zHa|h Hz 6޾rå|K{A)WּUoL{} ahy\Z{{ -8&iԲ|ûZ? \W*_[ "Vd'?}t.n'9>lpѝ`_d9@NۼEmzn4ARpҭ;pob\玾Ϋ}t3&R&^]^z5rp@^:sk,F <Æz,9EsEs&sB7PJdLwb?궗P#ꩧ4,P0LJcGExO:iDS!Vp4Sрj8(ӺQk+.U*\'v^߿穉ňEKl )r $XaA:Jaijîb7*rjU2),xQ)GEI}=ٱu(B1b8sht1Qр7sow{?Z+=24yw _4cAyy|y~gů{-O5%kɏ]q/kG]`Vj/,`.vBzc'?/rqS֒rm$SPڍRjN1h]%BڭDS[ELq{oqnj7e[S RDu)O<Ѵ[㉦j6$䕋2ZǥjKzNDqz;&Jvzw]-īPg3}37ˡDB'r3(?0]ՍA9jǽA˹rUSJigy{)ǀ13F<3Ȍr/5J o޼#&1q/qV,ցhj% Wc`F7DO#@K厘fǥrYtNua/RJ іd{7kR0c祰QgH'{T,~#|f6}w'&[''gK5SJGdS{RVXR_9:ia 'oePζp)ŤF(Հ>}c9tN8&ݤ7dj.oޜkLUn2*r0d@e.8rh$AbWUJq)[E'|Υ%qf/Q}Bk-$ozY!HOj$]J>mwg` ix礔|9LRZ_?mj%sEMK&{,"HʃB+A>6/xpcڳЬ|-^ %=ifoiW!$"9FǎG`ey\l& ɢn_Uf&CBΔ efk>Yͣm<À tmAgZ+L8hUò0{ rQ,dJ-D{[m.~&SX`m`J`5ZL)o[XcArr "#AC=4~h3x|wiGvqEI 5/߭תfZӧT4~N`-p OPp#l:CֻB&8H"D˲~uIjLҼLJIajrԤT1ы T8E8HU84^項\D є&r t6iF5[EcVbR*|881 ĉJit ČD%bRLlT-nFMlߚm FQ[BiDK$ѥS4X"/rJZ,U2hR3c-'HH4,"`QAtpk \{^8יI̱*Z8ɝTkSJH:#L2_:A`m\j@&0i`!RD@mjiAz@gڤ\ݾO -/X@;1Big ׸e~*1b߳kc"fìCW*1x'y6&E2(Ό?w``5T{iI{$Ve <6 3^!eJ5q)IΩUQ61  fφ\Rwlᗃpޗ.Ԡ_ưCi4ۚkDrII-PW?h$̛y 3h5d x՜ݤ%vedqd:Lc||abhjڏ?תSp:~R=Ry; >W5U-'xm=l,]E9K+qN ! lA1#{C-UD+- M C]% lx0KZAnoM -L )wHO D"d53JqV+1`^9VWrN}F$ӻYݾ-~nA4@Ƿߍon@l;e{s_Vi )$_3m@qcu"f4T"AMxɄD^Xo?`6}DPK%oxPw GVhurV4^p8FɕP[뀕.7"_'>n<1p_fBy39jxnz!dw4Z/EUb^Bk6zg@}>z7U7`Uƻi7:wc3Fv:>k_L2z톬}ڣgx5 ~o gn7OAx3?܈^q8o6\ߔh"J)IB>>}9oCt@Lo=qWb&V^DfLTlrNg>z B︺8,uht^b{wK[ `3g2 vطx ̏G۹M^Ljzj+GUvE(١\VORETG,Cep+V}= 5]qa6saZvXUi!)Zfñ)yjjfb1m 1Tkr{` R`2krў $8hc1hcjN1ւ!A1J tMr0j\zc)8VD'ktA|[4؋t*c?.__hP("8ޢ#i04ݟd:*ꐫC:d7fR4@s3FrAbYl w~ VQ?|,.їEc"M:|,"( ج~#ObW+ƃ߫U}OE>ӜJBFȰ 61Q$km$GŨb4S vvw03ɬvrt/OPTZI13nt#M2 >j'^9,ڊZz yl3SqK;j,VZh}ܴK2v2S F)Χ]E4"f#NmgQE0բh!fxQ? ~i Vfe?v-JLt#i\1# JX:F!G+(HuCCx$~^_ tŘԬtsP9i+`#G F-9KʆwD. *eߌ,&'73S6A:/ڣh<ӞRV nMHK˒hRpźvz6j@2RKwC7\+cEWc^RXk'YXY -D5mI|Jko84zGEɨ'9wyͅZǷ\-nz .^vF-CmݱViq( ;ǰ nĹ pNsNq^(G0X-F$?YxavwʕyJHS=Z3v;EkzUkj<2s25dZ*L8|5^s-*w-4?5y_kf^dkSSi/FscI0:^ɾvZݖ flkw2jrFdȃjJbM ީG=<L9ڂ#<~;O:7TO>qO7u>>[,]pnwa M[ڸv-$>yv2/V(y*1(ؚa7Vk_3gc8"LQi>̱ka'w(pmE,n0{QA{QU3&̬0 B7DŤQȩ<5 Rw3V%rŧ˛dǭ#H<YǷsO2>޳AuAz7d>ZgF'Bv Ydrz f>0eP'2&'ygP$N6nwx \D.p]mԻK.|DvРURL7:84:@2꽴"{iX#؄҇nnӳpM9+ؖc,uĩ5s#8#(jt:4ZҵC)2y[V)gU\2#0 %Uz3,24WQRav;*T,ESAlP2&i  |s.@dzў$MEȨhTi-Qli~dBמTFQFN aS`[t%iy~te@]Kn){mLS޺|(]JI;-m c+QE9a_=gLLl1j`C2lQIV lcQmR#\A:i< Dʳ@@l94)ú(A3 R9A~"OKbtxM=P5]L9F[T=fA5ҕ>M VS,_o᪺y6ݡ'&4j+h\t==aA5Og/jYLDp@yz;w#t}, p t͐G|ftVq)Z +n=>LukUA5v#fۛd#ɜ,܎-bऋb_T?_793)G2+k 㴂f(ڜͩ}|6Sp X:94i"fQ֋8nXͭ Z=% dxZwpc.vU}FMMC,>N ֲJm(J JV 0.xCO^T/+}uRv͔iS~Q~/V%YpGGnI'%{4Y_؟[vsqUTԏm/O/s\#<\ L槮FP>ܑ/}g_DϾ}=@/< YP DWyU!D2HuȌCxv|.1R߼J)7XkcDl4}`{H$\-o]33[/˾e\aP15g-Re߳WjҨRi E_$z!#ӤyQf@pqKwuo6XNk2/([‡8k!yv̪%mzWD Y,Oӣ^fg&!sNrP˙e*/rrp/6_YbmOcfo[ =ZQALp-|#s+3 Xb`8I'_#- M;Z;F7ZkvѤwnvoPfZNHH>"vͶ}1 lƤM^}=z~_-{`vꁃ8P}uui2h.?>CrR ߼N L1ѨpV&~ܒ LHfPI?s\UCivVUZ;&gHh{k}^nelD _W7~gAghHLԮ Q2PR c0P@m5)HAk'ݧj7FA~؁#!&318Hd`T o$Ӂ~ַDxs!2P4KY6ΕСj,%FcMz|FA^FJ4$J/QyrN&pcv<./6'T&iz `bHH7,3*Qh*MECrimOOU[{R aݚLK1[e)Yjs[ 2p֪i/Z ƢBƷmX^V/Ԏwzx.U/2@Ɩ@Ӵؓ -GlܖO럿tF'}QY6 h>בG^.1;FiT8n.><>~qſ[-Y]n(R+Ah=q$m$;( wˆ1-޼pb0:HA!U(tvFg4&mo]R]F)ow)2R?IǑˌh5 I&V"r2 IMRg0ޱ=L۴a9W U}8e ?|G Oֆ,hA0:3Dڲ8Pf;Aslh }}u'mk5UMǻj~IizĪ0x N jePG뒥8jLnz}=[eX(o8X߉ ;ߍnƏ`ai(?X QwZQZֵŷʕ3Uj靕+]gCoF[Yd`Ŧ/52p}9 K f9vD[Qׯ?{V+?jD|kwԚ8(A\&"M\6w7?ugҡ2pwmBƤR/g}%5y+"̈}NmVX{)W.VO.g ϱqD)b2Cik;t% IJ7̛2J,|@+i¢eUiqO]ħ8ɯ/֫n:Hr>/[1 55!E|Jg/;V)#'ral顣[9HbtC>SFs`$>l~Ifq_eࢷ?ܞ~_:u?<<[/:_w}Utprv ج ZʚP=]FppÆׁ;'kY5^SC͓$ݽu]ߋt.u|< lme`5@LJՒ5]j^Pcc(s-#.h%yt+Sqh*%qrJ5g(5p —Ŵ{V.MBZɼF#R*]mJ^+֊TomjUt!ZqxYU^ubb\B}b~R2iK2;"YO\aEMh-qc "۝EIR('MtI(̈́_]ߵzia*tw*5!Rҕ}VcDVk`8/ R&g\0i\ʂi?`J2DUZHZgGQe7٨R66^U6yN$ZLË,/p`bLZM29/\ ^OCazFjTt6(8wA`d#!9:# (2jQZLq8[upSVVFnѠdm1cG ZqM#骋uq\gNQs]AڝT'П!2/e@ڨ8(YMˎMAo3ĶLKGlQ.V7|IzKV%:+ ~}5w2=jЧsmvw ̹lg=OZ:g;+n^Wr Yc;__z=(d*)sL@mVբcۧvXM˿nd(Gc`ӜGQ?h"{o -ɛ~^Ϗa4*iez^*cD*DZ5Ku+Y+T`fí \IAP ڽeԙb1g`qfl8a'_ښ VhA1aΊbFY=e_-?ka]Р1!KpVm3L+h+ր̩))M:5O['VLZ< c|{RD=uUY"PsRКxHK8Vct! ;jw4>ZO0Fܷh jsiׇA%nȭQi;ޝ _OUf =)WI8a($ _DdvJb0wND $e0ك`QH:&3JHCBiw`a>GV 0V Y ĞOPU%\5ad-C֗*Lh GC3$XhZFDxɝeTW: K'?k5xAݳ !"TBD>=9hI)z%׉0dQ4OsZณI/4YqϞHTJLXI2`*=c;)i^rGK"-1IK*7D lzD`9{sq0+E43UX"С#Uj=G7I¡!1-si_I.:`-ZwUq\WG e#9|EW֖Gsڎ_Tr@֒ nE]yĹ0V hY| uDi5vjNP:]RZiiU!yNs =]@&wQq|VfgkP0;+8=ZDMוp6vBVK n\.~T(4ITH'{@TxHޠ]t5"Zʕ ";D1k;+ķ;[P%W[nRU= q]emvW!WW[t WpS٫{ MtMu~݌86T537x=r/iJ6|&:Ȧ8xĻ BG6T537x=nzH(w݆Wz1,7% 8UڄtXezuRPݣndS7 +THAƎALMOPkf9N=EF45aP.V\mr!Qs.dO+UtgmJ_yU\!LXfԧOPVJ_J+5A`c}$&3"LV2N$dd + vj% ,nb2rÉ'NsμKE9u:ɠ)u|j*٠HrNY[Ij NpKҫS3BQF?jmn)4F+cv&%foU>b+iZ['Z@js+AxMOFE{_iTZ7aP.VLe[uVٕV>T:+FG+}V+TOVF 'A6rωlDiFKsƤ\JKg !:W:QP</iWwMq Kݐ~//of7MWMLW/Ks⓳+w\Ob$lǡsIWѴWێ-B;y6{׼O_V!.gup?s\V2rK]eoPmMvD4b\iLoClU_-rc&򳋲\^Ӻ[/=\Z˓oyo3hmuõy wԨ-it,/VsnDI 9`˭ju;oPk;;W&0֮el.ع~zzcӥAg)i;XJ٪ Nw[9~G `p"A嬋 yk@EoLcK ۘ `Aj1RXPD-S(ebOG6{8-݀IRNRHWdÆL9H\iz銇'_Q|ۛ2|A{[EN:v_iOՉ٫^U֎%襲bZZ%MM;+E}(VgA=CUmkE*XRV"[FǺo "F9 Z:OiW`Ru}Sf'>y_[_mFWI&䷍zl;&j.RSs7n8iŧ!x U97bvi~Zvu}^3?5=>0+( wuY*wZy?,))}7.w鍪?*Hl׼&eA&̥5qwWWʭ]~I(4J+nܿ9-Z)wUz 429?9N:i*=ʎ3qj5#ڵ69Cl@pniv7\8N}SV8<.[QU)6sZ4JVaNVf*;aJ(# 7=5E,S ktj>^ߧ?9Ys! s1Q (w'V,M4ys?`!BgBDT|D8b`98P}`9 Vy`09) rəq\QΥ̓yús*e3o0?O-vt2bukUCkɶoamP-D.ױԯX欒gmѧ \rGCްFR!G#q 1MD)J5'ܠ^@^'Jo% "|콿۹8<l%o>]#ʚ^4_F(,Ow!@(X"}Rd+nχ:ͫ|v=UOSڣNB4/+('Vn붼κ,AFD\EVxqA*Z>5!|>5̪BDgwX{z)LVn-Qibjdȋ jɥRbU66cBl^kްn2x`lB31t^A׹Iе7 h|tǰ_h$6*j#p|޼Q Lr<vt3آĒ/ [ywςkCQh.Fm@نE^wg,a0J [QG97#9/9TWw ؇`,ٗB_i%b(Of8}3RУuwUuWu%֕p6N܁G1 tfGEQ [7(1($ ;Vy8 8a̷0N|T,b9(]ŔӍf +L%e'f(Vet&.Pg\| 0fY]N yZN$Q@ѲKtZChѨ!PG)Z߰6u#r0G|uϿ|M.rQzy_W4*D 啩3IӦӲ6Fq!4}+ &1O'$m (RBQGNNKeӲj GB3 T#y^t4d/ sZST]ڪ.ߓ<+77Xi\2l8+}TEpm۬TfcQXST] 4d[mR쾣ɒl']^tVlSRn-R̎vJJ )fEX̊ݒR(VT0ҁY@b6:SC_L/T@TNA]f{cLeaA D XDJZEl$hdc(TFBбuyш1m+9H9c2ּFP @M7C XcV\L}DJ /)7T}kC=zA&d$-RaeBAP>P`MI7c0 C*{L ܟjN=V["[jReHXwKR W%>J Ml8DDEktj}Y$%xLtF ]$6frsJhA%C]@Uυj;[VؘƇyX>qj9Ӷ;S*e;ZY h.>}un0"C(ca.'(TT4f>N:5ul_"e-6AM)Wy1+g}47K$:RS/ $Ed~uE䒃j2ndC#Aݑ=ՆVOwK#ty 3䗀3dU,Z\6}5IɊFv&rdm9Qײ¶C1P0Ry|l|{\]a= =+w(=}G xZp,NY,F@,ƓkYr+; 2ݘB cĂ4qOW_6cDJ&CA\M4id/%`nl0BD쯿SoʿcKm>$,X:Xp3?-WEO?9B@3it3h >qp=|Sw9J8jsp]>É5owu-xhY 2k-1a!FJ`c_ =FKYلc8p4:iUǏ9w-WnaE C*f d,wcβaanb(/̢O5C/WN:PY0{:~K4hVƏrGuI?wkX֯cpSYz>__ Ult3U%eq +m{6A\^pnc6 H奠OҿT0?UH3:+ޟ}ջK|}W˫p{~&;`waJaB7 :b9oǐ(˩][(9ww3|yn1ߛH)oԛn ܁.5.tf C"N::t4M\ɚ|Ѡrغt~sy{ؤ-Y>M4-8JOZ'S.gR鲝UqZL;ݬϤS! Rr r͏hM8@J2NWs+U c'VD}]ZcWbl6ۂ[ԁ;; 0PM.:O-v2Բ:( Vl6B: #٨=d%IDYƆ:Sr>4W W!91`P:-k6M2+GHRQ)R>:QHp [7P'?4Hs(qoKgBϔ\l*8[wygZR p4uYbjmu_ T|j.@`*sw]B&7`ݴ(j4:Ti5= .eLѥdGaIx) -vs4m~,>Lm=(VJO/=4B5 TNT]yxV*T;b_)N[.du¬T-GJ1E!|]1T'K.S](W:aY$*NTvgK,o$ aK KN#y4)X˱5c,1*Z2)ǦE%kH1;[ψT {?E)1jE8nJ*k;u̧$؍WFx5j  jTHGMGQ.ۆP>i$ЗHȶp TS#u"!ӄ7W>.K2B+~^ 5DDןUv C3!,r$FOLdh#QUh{e՗/cZm;n}rٱy%M| &֖:5eLaNlJBkJ ۘW8AggXUԖo_TvE1-^=OK<1EwV 0=V-4g欤z_Jj,OunJ !I`yzbԥNAlNt|e Q*Ѳ+߈&$ew Ghu5m"LzQ&_2oy x> $A$}[rYAZ Bv[1El87 ﷩oxl߁"RWT$;i$K % BgV9_gm#SIGAoS[;1rErG A;]s1 zOJ+C_ͨOA9wMnE]1 0r>/cpjwAA<&=̈́{8]΋;HJtӣ:T8!J`'l! &g*m_mt:{84o8\bzcWyC}J@eqzucHZ @-> lځt1ϋ<^'}x_FX3#hƼ,kd-@֦gN<טA+`<+ u)_{kxT^BD޽[}|#W71 6wնU~Wmݪm߭/>tQhR0RM roTHoA%gugk7g~rq 7gE`gumγ۟ʗIioX!N^>מf]1MR,ekfY`I#ZJBI4XkYZt - (X$!d#N;5'io3*e6)l+qQD38埯3 Q$U[y=Z BwmpwEa\m__Z:Dg|o3x7-a8/i&=MoR}\;3z/nM&9_M~5&I\%P*0G}dl!]ШfP- #}gɏQ4홈<>j!!;6Τhhr hhpkgҒ:hNxt&Lmv& ;+}G:˹;hW*]Mi= z2PJ&:4p;|`f6 TƓY aƤ܍a{?ֶ;Q_EpD:]JviM6v$+-ŤI~˦}6FaJ膤'NڏXfe<{,xDHh_m|aZgNB7 5]uܗN5SWQv dLsdI+;H#i {b2IߊP :2mhSPESv>-jmtRɣm#: h}֡ӎ{ZENj{tB6Po#[N\#KξlbcDYʥ|`o^?~zlFH:)uB LEZ*_U!jUm*{{h ɘ RIka.JZ锠}cWeS#$pϑ*5* tYwD:3 H(TRgCB"*I-ёY"GPm0jA0*!B ARmpiJ)d,fG1 Q v&Jzo>IIlkgC6XB' SZ&$tJՙao.A첻4_$S)m`Z@{2& =^{b_|뢔4꾋Z5Shz-<اaHAHCDe>kDuX0E% ~A,9x@bB G $(E=+_r[zWBAt 2JaYAʧpPyuW炏2t/ۤWm {X=O*wkvvbe)pR=HM Fў n=ENb'>ӳ}JQmwd-/m)%=J݈ 6QAUBa)YQ)ۂH=r}p".9N"uC6 ؇rtmJ>h\2øRo?1wIe !H뢜f6銉;ͦ( x$;j0 PT6B~{CZ܄Ý-˫zU<_,qXx-:SmÜ}\=>ݯr8?,03KoW<Ū[<bM5 ߄)'>>Dz,nV}`dE?̻Ii^|m\x|:{2@sf~d/Ak>* Kaكa*\%5[ xHl9XCɻ]IȝNPsM`x+ɗ.u1>a'RM`O_1-rb{R|4 ~S4z1"JFt3yz|J[lOI7bBGCTmHN qZi.3f`Ļ{iT.Ɔ $v֮rw=lM,Wϵ ˁa/}YϻJ{-Hs1ꂎt N_Gj\ؙ8yuoJJu@;)0W^dxp7OH5IP&&-,*nJx%M Y,X]FЎQax Bi$6 o>5ʝfah4;`}b~>t7NelS;y=dtBS, _ ƌĢ=]{D3JJfCQ)gϖR> nh'-KA#|!_w% ٍH,4-^ }6v ?e{JK<`uW_G-Wpaos~K_b}=cNsP!TT"qK@W^WSEd]Z&)K!ԉVU5 1¢.#}$BWեoB LqzJr-~\*s7s?S.re6_&Fw;bD1RsE: S=N4ދÌaɖ2#;yqچYꪴф&S[酫b\c%bFxCR8 {)h"H@%36ALֻpÂwÖ2d#v, ,9 Q H#lna }{aDBA7miBagdYhrt*mLϮrDFΙX?xJ Oy8'FaF;16FvhF n$!oe @I祼$< S"Jhgd4O4_Ơ;3'o$LyT~<ΐVl޲ܑ*K4}8y>@zG삒b-֮hN똮9tbWl yb+ds< D4rsXѺVN#1o))j( CԼcDЉwnDDS8+4VS=Q2 #`ӥg%x(`vik&(`$rLPpxfP$6 d 81#de/$ ah^< ](ӏDĒlvs󃿄6!aY{! װŦttQY&B~{G+Z܄pgK^;OnWA2+kyLJO~*Z.2?.Օ^ʻ'Y7a|&{|kۺp_Y72u~wshZz]]lx.^G\s’VRp @V?V؟u@bдǂ7.ND<1]zkYyNTT;#_UTJ3Yf-wX+pR;]2PLU%I;.D@ ߣQ+C%z.-Ɋ bJ-9Zג&8.u1*" L2jc<#ō5Em23IWxF Z dQh/dTamO7aWRְrE~=+mJ63ĞU2=aWQ)^vb,^gcc7b2hؐNemm"z$>JO磌[|,I4+l<,fekPWg/"x? ۨӠV2wѨKL$VEEػ AʹFLg(~^tO(*Mj(%?oeF:eGVPƌՙ}4m]8PR4#VsMcW䘍$48TYb "'On"Pi &@Kruהq'Fr^dUFnD@l;XP0;Md$c]$'UP7ԞlY=)BiN1>!>ΑQ!qV݄0'cQpCפP ƥV6RK7Ol oFCω]`Szn{c?/ Xwx?@VYM>?u/C|K_bByu[c%Z#kUrҕ ԵGPϸ@v]ٻqkW|)< 0Eܹb QԌ$8/%;lK6i2x lwveyC9%r~K2֓| NčS[9o7'/_/y-pe)[Tw5c[Yg@1v'8c /XILy!< 2V `Hi\+PH $ {ˉ XҮGC^UVDq _k0r2n.ZOkJ nSNfJ[սO)awGgKi2U\jl,g3J{I%Pu75m/uKH$^f*@~.eZ>ddKRÖC{1x7nLpu~X7#kO^ hpnᖛnHi+Q MX7 ?s ߧ7n­mKqVQDbU4c)Ff]UZ,"s8b&wkLq hoՔڇU_\m @uUFUdч3=*T"|v!7cFTsn[Iݧy jO^<Ň w C@=uawaځQ~|*((f+晖eB"/3$00.859!2GEw?9=, O̭t`g釂;L]^:@2g]l)ZhMf 4W"z\oխ%FlT3x-7s| @9 "8(5J!܊ϏwJ~ CU{@ZvWzkBh<:)N}Σ$+?E\iqi'IQ K<-,IUAQ 7RĜ:jHnx|w I.ĸ3HWE)A=D:Dhȧu(A L6n~nOMC/KX֚q{Y^yf?\_os6.~;Yս{B&8\IGgװ}N~OZ<:Yozt8Z4&t{vb5ww#e> !4;?֌<;iE=?dxFx9ss;惆$1ww]Vk?>>vѩ :Ҡ~eItV38BIwo8vzĞ[F tRq{p']ϏNj!c3Z]I1z̲]ߎϴQc%Ih)h>=1:O'Кxslܾߌ>cfWsvF.O *Pupv}g͍-W}t"7RdĉSGņ&~vzT/zyQ2 f|Gs1сfHWK2:*},|稨~Ie&GPC%QJ!7*FБJ^n48I"*ʥ|/TT?l9o+ ow[([׫qLB FwfO om%'Oaoj%LߖnZTfCI(-Pg%LV$O3`]h?}_'U9'}柯+CU#A?-\nߛr->[ګXC-n}X<-j W>n 6uT*ot&&o.uUvm30I7oDRci,]Ѹ.-9;X2@n  -B 'kDEI1G݁ "|{ 7K&B1LzOhC^RHʄJdGMKu3U@ @ :IqJ“^LBБ &6F떤JU伮]G4:G؋H>jd*&vkF 4( ,:N8UiR.붇f땽ԅE S*MNfS!i&dF,1rzKG8p5I 4[JQylK$$+f:Ϣ5x$MWNjՖ\Wɤdd,䳯P8g%dr Xdaw/jAK9ةQM0PQAe"JT9s1*'Ip8{tsՋR!IXټ{@R9t?<Ϧ)S\5 QaC"y:@hlгa8&`=gldqaRt(3 0甘ʀ[N),3,QF+֝lA4r_aDL)4,AR-,s@Q\ pR2Q*^ "\Kl"A]h*.M Sm5 = ]IMmQfhHp[OsMJpMPK]pk0O %_ejx[_Z+A7oPbws3BjHnV}rCrjF_/qY%;|ױb"_ɟ}&.X͌}Syn= n?]wȤN{[f/oo/~o.\^yC?zJ/6Z~夘gWl>QK]@Vɲw6]ݦ74)&ȏl|WëT bo/h}fDi-cLi&PM>DӼBdo^?1fb j6`FPS HzMHz0/g߼8lD= i)T}|]^Ueur=_~wSm33V2ݸ,= '=ڠKS8p13 Bٜrwteh4%׮դ=ֱYoY)-*v Ԓi(NLp!a73>_-b LN/!OQ^_Ex tI,?;߮m&o쩌'?J\0 ѹx)r KFzԀ$qU$'K"0 [S5z^#xr^rC*>y>')_: yPƫp|]MaZ&m-o5TSe)W|Nr}~;:>FWcݍDN*={a0P"IW?]A8zlIo}qqǸk$%o8>xLY%'tQ{wX;732Xq] sу}"?;/D)WTͨQQ9TyFCC %4[BIҳ rF)Stao˛sz2;[kg*nq,Y 9DkIQll xcLiֵx- ,Z9DVHls4@$HasMHq1*A$WKUɧj:ԜNo0u asӒd G'`4(cܽ5UA VfJLYLVcM%r~b#eqC.nwy1܄em|d^oUtb+᾽9\D>E_@w Q7Vd uV@a42"%!X0nsK-a@LAp/af7Ҽy1=C\gJ c@jiTZFP`F!!f#@n/#H-9>!'*,ĖS5ejg^oZbJM4a! TG^`]T#AZKA1vY]Yo$Gr+ >y<"/4`hc^k YzŠz8$)l;IVEfV f+82㉩}dU0{dU!a2:YQnuĂ}.Çwm0877ʁc~g0 'zעk`f@c0 :;ǐBmuJj%b RO=V߀ w]!#v.hO5W)=n+釫~ k}¤"ҵ=hڥxq#=65eMeK7+3WcxyϪbC素w9@rM^ qϮ͇h >]Hʴw<6p_UDt,lƱ-J<oٮ^=Z2-Hi}Jz2Ov]ceZܑq)YK՟n-GnN;BۈT`-Su!!o\Ddʘ{Mf"1wi!p[|c _ցqm-Sl:/ыʸvc%20?p݂?.Xˮw ++72)D`rHS!tT_ I (3w }QTؗץ]z{܂S4{,',&:OI,C *XPJaE$gy(+YI(6ڸۣ 3׎=CEQb"r/F M؞(~WΖw76q~v\<8si/ҵ9߼)pͻv<1X!{SܪHlF3e9Os.D^߼!zYWrlִ Mk:+`e3fU DJpcXJrc ۝/ FR:O5yJ+06?[DN7Yk@U|NfK25*FLbf+ =a¯UP]7 7%zn :ՓRRe)eFH:H*T@51d׀cSCV)Wjz kb`)k1|-!TLL B Fpm7,զ:nr c61\d{m_)Y>=8I$r2 v8J!FHaY(8xѵ!\iZy/Aī{,m~&:*lqAw6]ЅA |t]bt딇@ erh!nO̴,oOtؑV՚Se JTmiTD'oal;jj%$J-c1Ʌ}GHxZT} t.$䍋hLI'o&dN;BxޗibS0=[E4JǹxΏb1wh1 (o_ݺ7.;22 T0~G/|CC9 Ge=SCxCSCou h(J7|$->Z&Z+ہgxWTW~\϶ݧFu"#M<@ka *|L0sŠz4]_>jjcoПЊܸ! ();w"Z`opleJByq *0ٮn.+! *lv}8++[Y>Zr2Y}vߒEnSO?L|%Gsl^aAv?*,#p6{ 9JdC6l}4v/{d=C]%O8܍cm1pl)Ȥ}W^ƠlgKNٯPw=dt#ml0"2wJ, O:e}ŒZwI%`I',ƞz:LƞO6]IԘF%($n[+\<3\ |}djƶԼG4z _qJ:5v?c^ ʿ :G+:K8&f憥l.IBtb&kx>yud^]5Zxd4A-JXsxH6Ѐ> ljߪBn0 66/ ʿcA#Ƕ th0P1BׅA$E5a(FA tI0n@ `xð> tCW(x  nAh $2}!{30Ywg6 ԽlEc1a҃@0f~3?dt4#@R21%9a8.4!yƛME9@:ӣ:,9~NƺD;.!/Ѻ yv=0r>@ZvTG^GPrF> \?.z װd jOkM&vv&1"1t1NEֻT>"tсq)P{ #jX |D'!m< 7'>ESu!!o\Dcdǹꥋ#jX |D'!m(iOօqݑ))Szh;twҤ oht!<7 p]K$j/TS'= Es p)ev2'5`T$q?)-ԯ&=Ti5 )}]?n)E%TR'!/Q}K4'.M\)HT_R-emo*?)v4ӐRV~ ?;n)eOJY-.&!LIiIc{Z|pHS׺f;MeJv)u]7yh4ĐY0`QX~ ,N%f2E23&SY6f[uIU;X]cz*EME# .ԓ#Yٜ102OQKA]ռx9Vbp/mޝh&ͿoAmSR5fEnYf;e8"Lڨ]xUMAqj sP\g&"%|E+n|rk [jgέJeɹa?|6xxtً 9 ƻMS:x@d!ۀr1O.Hgl26{bP~9nxJsV u.AOpz..զ{C E0!(0G/b7>2sìxJ:Q j׮"rbY@ܦ$JpE)`l-dg:Eb$"K2m .YR+YC 5G%ϢU>Cwp~_ǟyw|.7lHlߣ ("p9 _j\j|W3Ob N%n+zm{h}Ր TߐN$+&?6zlDK}; {!R^>(b5 c31yE5DHfd:4IZӍ*RmH ;O,e$L-\ξ8?6 \pR- Lܔ<LsIJ"a& hu|_.򒵆\I´dhwѕ4JrJ9x5|2'~Q{x?>"G g~ [/&~=|>ء>@4XemV1j[#m *|b~1{Y4?󟓛|2s^˹.ĿxOtQiHmm8ٸ~Klg躚&as6 Kv=cmf.Xo0p7S^Yݷȴ wLr_Kx2sln#˻٭g]fשׂy?Gbֺ-ŀ"h4Fk}#uV[=?q 7M_rVܩwg0ƪՏc71V\j 1Ev 9\{BKʼnd uWiT50V"?_{:n,V4KVfIIJcEh&$ ,$0xU, 'I SqlOeVR8M0UbVb D11H$gqT# ^ Tb9b ^*5M^6{| +ymfi,@5%]/KT/{򴚦`Xjex:M5??6 ?>cw7O"Y.yYLKQۃ|qSnwgs\m}Z;^ yjY{h,AY7d!D}l }}Gnлt|Qǻu2 LZnunMX;7Q/Wލt|ub:Ϩ:Loͻyջ5a!DϷ) x/̚ӇVzV|5نlI`jHRdd4"(DHasZ~~V$^?̯]p~9Ĩy :Z DaԒEÆJxFVzL껪TT,^JG+c8GL+MA&JQD0 ?0M!Q*J!1 0h%i+R#GVJ!:&]UjFE-@ +=&]Uj.l+iK/\j􂭔@'_jˉay|ǥJʉ,^JG+c(SXH" RLHj*" 'ښ)I`D Fq J24MIE$!X( Pimf89Dmy2_F:=DM='zxWZICt4DoO3QT٣4~YlLDEԵjcOy)vQJyw]c"[ϐu^B bh[]i9b5EY|p{W9t{_Ju"w]tkz!F#5%ɖybbz+{Dax}t'\ ZH+6+mh[IJe=po#ic´. 1Owyh{XCm}PИ> k1Y‡j@4ZZcuEo,j:NbEx"%C4 q$>* >TvʲHMv.ӳrZ_ڭ>.+e'׭ rH ؜^ҸLÜ1(P@e+-~(<$}gmlg|y|ۧu4WPD< .Scx%fgգ὆t5Ye`. ^BJ% RQw*'t*m5(̕V>RN3!pov[eT0%{PقBCԦ{IIszMgv"9+n˭3yl>ˆ7[Y "g8%fESlCGvWW _g1z !"[*3%njgy?J}|) ^B,>rq3ɲw'ٗ*OlIBMգnڵCFShcֳR70 c%Gq1Ep6<1y#i8p *x"K\$f#ps,ury0*"jVQm (i$O<%Snܓs:{0O4A'AW<>}E˨zt3 ó{r6bt>A 3m3)0\jjַ2Ee"Nm|"bcfcN $u3w}ZFRhKcRU6jJ.aG+cD!z#cbf&\"I-4H(L2M0טbm@ d٫YO Pvʹ۽-(DQ*-6_?A l}| ف`)~L{kbQ'`fOO}1oL%GBDб:j4Uk3P@ !}u?6J.%cWZ=>{ف"\Wwc0>x-LzˍrBDU&5nFpsL껪 0z.C|tiDYi!d9e[)ud9i+R*ݬ4S`gܘ묕rRUfԘ"fD#ҥ"5D5;X%Z)qz%"I? G"i3 qB!ED`H1X)#HrJS&%jB IEn4GP'S׍ Rԧ؇)U1&0fԚ*PK 6Xo6:hظZAqUkHF5!i"{pv~ZG3H6K3L~RUw}9,;ϗ։h[-'RR/HBD^ސ;?b&KIt8S_W=ݢk~O] 1(㖨[}7N>ԭA&'tO{;{Lѵ;g;B5[}=D͟eMqH}LI:X8qH+vfEc@n0 |ԝ7?A%/ua* J}Z%.}Bw?֑>esw_W?Թi-RT?6cAH@-}%oKA5@ʡ[9D qijYvG0H ySviLw79U(v#AvY$d&yE%{t>H\@)qii^OVY>hK"y6X9.+хؾ٪̍N Í~%GO%z9A+R^R>guE[Dтf ç?kcVˣF@)q›㾿,w Ӓ1" uM.Sw$5 Ν/3P3y1LH?>{ɋPDOOOH,R"{m\d_"`>%;TvN/Wp^v¹-K4G1S8±2'XHtY/wg輔"ަ4  ZD;mF4|HJ}nE9|9 aZ%Z=w+E茝u}'2Ԣc{"XPTf;X @XNHXL@;PNxtg0a޲` a7$P)uMgp7g qX#bZH@88)@  '\tLԎiN5b3'Ō[3f|_6)Xɡ2 $ID,q&I 8Cbgt7;9$7QABMӝnF6}@FůI-y1?,g_u9i$~x>uScQջN0"89F`*)s`'JJ1$! %T)$)*e1yAe|]/UG -*N}*( =KC)f̡0 &Wd]fTCc󲆄`.n'Z-qh+ ܎ l|ЍwSİPp\*2V~0ݛg\]Lf 9:F}jh޻2/ē37?O̴\jLG=fw{_ziV_5D| ̿Fp)j5]~(hFW .fYX-,K>͢,oJWnQoQ)^tU|jՋ: SVZ؟b5]W6;bs3 y9h8|\;@_*Y×w,K>XjAcmWbcH!WKh,WC0S6xʊIida귗 U2DOf1ߖ:wwSkѿ&ө ⭙5PpExIMSy̤)^bIn!xG1Oogc6BXSk+zKX[RrBw; 5Z[JC&cbF[ fa+% L)H3`ywFR&4Ӷp0yJnJ'A1yGZ*8i4n4EF cx;ͼ<!덋0* չ ng=Nk?sz©p(tjGP@>R,iR}qW^áЃ#y{L+љzMx1\8]Yg]DmxzK{To=skTl<|}Rf>}T{5:U' #&%M0MbFI+Xm!/g+ʅh4MLJ4s*IɥchJ$2it\iPd%nM~ww?%Rca@e "W4lu"?g_!Gl? ^vyMy==w!y[K<2;'2.Bɬ\3V)OlPF ։Ǵ^,>ZZOq 1rB^FL>ݐZWJFnN7x/ҩ[@C[M4Ʀr1yûqV9Hѻbb:hFhڭ[@C[MM)'GץoNo[rqotzp&-KO^3Y:?K_>o΃_] ldµCnc@ vmLv~So5 }0|-'*r;"d[K+8uSy/e%5r/7buZkk }Z{嵈(x%ƱVt]\q]wQ^.>e+욜jzvvTռ?*[rߎEr{I—eQv}gw^}ݐE<OBG#_繧% ڽ,oKjBכ TqH(vLyr@Y,RRԺ[dN M5rQ\p4v`wv$5pyp{dSJMJ Rjt4TNu%JB+`{m҆YodLJ&aRꃕjf>>&i]j`lYy+}xVԧut0+bnOBE>%7_>|\/\Ӻ(&ԩ%K/ҿ {)4 6բ&1J~* X*7/Q4P% 0?GñBu IX-~ '[Lч/~8w";VYn2},`ҦRbV 'mI^9 c:C'.- Ǧ9mGwt֥05糂ӄ1_墕VL%`y%[7v"f\P6cCF;|`mfUsaKg5r>hAi.s9 JRlf ,sL]ћ$D-tkzgpO5vv7j >I+R M(argINJ{+qe:C=4CHl2Sdb-Yf껱p阖 O1cP!b+vg /5Jslg RKi$3cӳym}_qt(\*k>qQ4CYMu"R[i+ @C$Op04s;%\,Ertp#CʴAK:Ulg\uf4Z!*f80:r$0BrC{0:(&q}j΍R2d-Lc ﶗR$e}DQp%gCPmmInPHd6 iOhix;M; lX6p`oXoUwFWG'1͗Ō_?+V3p^pKMRt1.QJaF-[/M =dkAAHZe#EZrqik/踾 SSc# }n16%`teN7x.?8 +V[M4ʦL>n$bX BL'mۘ`0\.лa!/Dk6xK\Ye:臌112Ɛ7l%c'**DE lͷ9~W)ij kKQJJ.y}k3K}ZZC=R!¬T+1 +"JK+o+ R&aRYi)6@NVL*SiI}Z?ĥl&B,HjVj)TVRk!JJ:K疻e1 + #XI-*z2*NrZ)hTJ?J{hʑ]1דc\ #v^A=`Lul~lEer,l]eQمI*)(ϿExG E5|㙨9: Rі`ڒhMBuP=TC]Y6x*HKXЬV2J'9+ jj{]WOrXU0i,=C~~*NsI#}yHf?OI|"WKcƏşAُ?9"LJ#ޞSйwRÄUg_^Qrsn='7߬#3s jϭ4G^\л4i-Cd&5< ~/sW\ghռ@mK`a 641y j.$IgтbIb\'IYj $>\!iNAuJЌkB[6-(*eB;,~3E2BnD6GIX{HlOΕЖbPBH[ze}͙RU`iKx7^h!PRsS[D`(+n/PJMeu4+7GSԾdy}K y+90"j<~{)Oo>6^ưʰ=aAɄJG\VJM|A\DrQK!T9wwy]3ؔ @`FVt,K4?|gZ?Of)*grۛ2JⶓW7ˁ~[茺dJH6lhªeLLo)҈wڌ%Y4q[k~2Na7_hj/Hx' `)Bk Gɟgkȓ_/w7blgB6p]Wٟ..߮~z>F|l=s?%dwm!ܵ0Fmzp-n1ޣүF&6j+F. ź#HOBlZ\5rpy1EF{{6o?zwdԵz !C h/4];8<—k a~};zn8C-:G>jTN&wdb$( tH]sYjɱ5Rfˆ2C֯ӿi|y*urVi%5U_r!9Q\"ZW+*0./Pz˛oǡ@:g}4޵6r#EKveǷX\$&ǂeK+3,bll%ۚEu~U,VXۇs&}w3]#;-~&tv4s{<ܯҫ.3r9iN^^6ravVaey;Ґs)M~^nrt@Aľv;|njδ[|jCvkBB޹Fɔ`Goqh7B+:-щ}GvH/j ̖[|@քs[$tCRyEh ? L"OXtȶ5x0uآSƱ z% YSqxKՑRIj\My1X\jhǽd|[{fn/οp_}cĕĬCCN#N9%SXUEb L_g\ųc1 [Du(V@0[r4@:C>5@Hq];Wpj9sQ^?UkpTrő)hVj8~J6٩ٚp$ l  HKTO6jւ@szB( xI:2ī隇 ĘwgM)uvPg=AeL̅=ọK=k0ǿgM0?{X B.QcQp:xJ?loBB޹7U*}APH;jE1̘j.bԀs)9zsEkAb":﨣E,d-;]j&$䝋Lzrp-2)y[dS6D61F<#}5.?dY_ZQy >|<,g>WҨ)f}~aw`72Tk}og|mn}Noì|ߚY1_71\fkrDT&8'J!_txzG7pwŶa[zgU  llMtRmvҀ J Ȇ٧ߓk=WSӚai4D5Ûn䜑Ws }$0_{,DPĉdID."!VcRǕr ES2#R;n˱ѩqk.Q [ rJl.M*Թ[;D ":#AIӜI-EҬGfχۍK+O1?Ay:ݣQQX/9S*x\x2 _>~|yF IUާ_zn0lX1CjIh>LkHAUG͵T-K+榭'͙D)q! ӻ9WG G1!A(napn.0j^/lן 뷕{~kTιe~#Guζoc[9iq+"\D9O#h]jΈ"Ժ 8\"]Ab D)Ռ76~0W X+TjŊ$ޠ˱t M,u11_NІtuf_Vn4jd^E=< BHAޠLR$ϥMqipS m Q$Ɛ>=7(eHp8jYp6!t&>g1bu2d{.s"8+BT|r,lwwt6yœN-̋QhX2qayĀ,O h1dot/MmO< 4hP S,19M3錊4Yx6*!/XiQp`6dgF9r 6˘էzfG)A\W(:q'F): %EvNV[mUJO,5 jՙ`?~=&Hfi ;ՆLdlPnh\NQ OSkD\T\fF<s3c05\N`8NMyZ=Kތc篮R'@D @YJxJiΕ5@UG#D&.S0t}*Me@h* >TXH Y/Q^ +Q#晻%kJR+_Y0xX9/?-!8ĭxKyfPb qfݕY9^^5 5սE\'&&aI7k(;2[gi8ز^#Fz)j&\4u1ʡQF8]TRZ^dۿv:-G`O&;{ ^~:-+3|X>Nlo8?.jgA /1} %:f8fٸ88koorp,hﮆ`@g_>z W3("H3LjӒjJߗ\bIy\6${}ImC.k#IgE̕m>nfDR+3)H̬CA4., )ZRRɅ5r`q;[nl0 -h`QOGCGyO[q3TV C#Մ2a5;m2 93ؤ k90"[UUc-F0ZV/㵊1[# m֬neynA͘x4fUx:" >"hj>9"ɋ aGwQ$Ť<¸wQgw-.a`/h>*d9|eˍl&;Ǵ8b.z/ ϡ' bO ܳ7LtҬEEaLeaB0ЛߚEDx^/ϔTmCLM_"^Q ѽaK  nNd؟")Tla!/MS.9% l `/$)_{?}KDnm"4r $Fd4kR2q yr%]I3XQZu8+l` s_7CWڑI8W(-q&vXkHNz N0xsӅOj;ZaDܐrCR-+] і>#yPG;|B)͆9# jѐb`@)OSS)NQ,Ř`eU,uT g5C. Ee reiP:+8Sa@UP3- v]ćUY~\MS|%:u@ݠ,q hćv/BE"s1y{z?y}죅>r"@X}nͤ`fZ,@JTl2;Qqu^Gզ]"t\R 2$NͳY:&A&w{tq[}~AR]c@s[E[`c o-.g( jY_6\ɴ`(|ϵWK),y_zӻx ::~;+LW> >-&fzu.9c2r)dO`gV]-Zigw!+vvp~mߔiP# Gg"v|EO8 3|jna۲]}4a6bI9KVJ 1ϸ1ePCS65`5Knl'b, li$w e\m$TX(7j)Z[1g{+d0'*2 RJa/Êg#YDžo k$?JstaǙHcgSg ]Yu0TcWk} 8̈IiN1"E3sf&Rn* 41hl9|eau%fYJ`؁TZ2 65 )aW Pv* Qe#O1jD4{SĚI΁L@1XbԱ,+nU"Sqח["2U:1tlG0f0o\O3@qݻ&,|1=ԽF( ЯZ#ަGȽmFf)`/QDyr@9ʻ*ǙsM`d %U{/xoffo730 lW a&ѽ UJ/?K&p(<\xpq1}Ty5g8&v?<#qgz~}0+Fb$#xD~ ]̋Bv۲cV}URVd2R` _ &;.Zx~{˗ YCZqbj&x z}<6NL,XZB3GT«Ż}|v} AO8w4^-6|ҏ=R,] Z]TzI] ei,k1tڊB!'煬 *u@ў͆j OTZћ/WC<\ѥ\-r,vݧӴ!*Mh( />)ve=)~ӲT^Ps?sRs=ev$juŸ,D`nŹ~aGewWp{pr|DK6TKrZyf'RKT{M'v'! Cju#[ԙyqfr%=w#,4*חԓK`M|9}_[#}rF;.S[=ş߾m7?ܾĈ_b?ݖ󕷋W RR 3Izt+E?jWOujgD;VV[6Oa,dw_e~4ܻ*ge8%S47݊)Nsl#4H cy8qzy3R\qgy tSv<;&k?I/^S&%;R&tڪU#mF҂8!_o MG<$_,5!Z,wYkݻ_~mc/~}(ߔH=q~1T5Jt_^7?a6 Tʞ.&Ii˗Zzx丹rLkCÔܞ!)ikw?sn:N7btqt-;лL)y49xT@'1m,˜![@B^8D1مX1=H_oҷE~ọ7( 6bm#7|v{dڭ8nN͢&^k2/&2BP5k3JKnKmKD) *d{Rn(='@jjl'_Qz( C٘FNQ5 %~GL&i5$Lb>`ԧSݯM#Vd͠3#r}F̞}RLZZh **3z Eh-\V-Lom5 B^MG E3J˚Ayeu˃}]`߆ߗ2Ӊo\8ϵԭ,gZAS%Y_=T曝3mJ2=[,"5j&C`U ;uB~Fܿť&Ct=4?0F=ض;Ί[$7bbuE~2co>kܽ}RgUAA os]w`BQ|VȲp^hf=EcS?t]B9cվVr[&3K!# 8m&\4RW,d&BYC (\T2.?T%d18ن%RhFs9.A{%Y,^ >- 3zsRf]O/$>NDZ1J묠 -;dU;K.eVg(ES eSBVV4|);E߱!( ;?=;!8SBA !b10P\-V[3bRR7^rzvԁ(+I5FAwPɡPQJw NY>rgMqOjN=6'Tm*oΦVqLHKP= * GA\ߨ, .>;.Уo%}u];Ϲ~B'> Y.FA9Qp$GRd0ICP,8r<דcNOUU`Գ,Q@2TX+%#@2GGyOQg[B?^dفZ|8Eq}ͣOo6ҋF6?H:Зnh=/R_y[5;&C3RR(3G%azbԷU8+?|gw"NK$PYS/T-If<hqvpB<0b8u\A9 x&Lǎ/oLLC`."崜Kjljn/ׅcQa_Um̀ze)@s^^ i7 /EDJ',1I`:I,\r2VJ*[E! @ҕj@Z]Z2sh3e^e, _*U`P,'M%0Hw] LI:"uxRZdF*EV+,5XJTP"RX2Y0jQ05* M6YSkQd(:+Qq5**Ao:mg yVSfgc ulLfF.Pn%k&_=$Ƀ S]r3̹<.IZY9oi>%" ޙR9wN }1ńV(Ty׍+z }zʵ"+r 08ZJZ(" ` f{pGBG<H=|(8Τ8kMoi*IOWfVeaxten2Ӑv3ɱJȒBVEATb4eY 2+ [Q9ԕ7R4q։sN*58㻖{o7}ߧUG#9Ow?ߘH|TouF$YS\8qJ#V zxHk!#9MIOnbAriQ3$YqHac'(0#2%ѧ3Tl>N 7[zc5F^O1_*3.ZֹHUT58KZ ,skQ *☌Cj2!,~]H ,1DyӅTަF#KO@ h ,HSuoxo:;wKhw!/Iu2b|݌qB6]6Bݒ@ hgL1,E$a5HCp6x_`fQ`}Nu[j#Ĝ^`/DiQJ )QzNu[jKFAws`@{^u[j# (hJ CRqK3 J CFjmQ RRqJ#>t:hj'J!Tۢ.r6еdm"j.jWpgk~ވ?ijtCכOT-}P *ǟZ8zA/~{wLJ/__l?yb{cΧmfP_?ɫV?ܲwUxw6ކ2We-ޟQ[o6{̗0oi_,:杰Vo-j m[` `G)ىu+"SG'/kZ(<ld l"tI|˷-G][sƒ+,d9!=sSameUSq. NdI$\*}gJ!FlJ u_4GS]}FVZFk5Z6nU (>R‡X%LUU?T%HGQIC(ks0h13b$10AR *Ʉ]qvevyu븙?yWF/SB=ѫG1)+~|Λr䪛U˧J%_ i dzt ?_(J9ֱ 9OW(e*re|3ibwYyF1܄դ'\e^}~J~k٫ %IiS[uOs{?s͡vu#V5UPM1Nm+uC[Օz]sэ;=8xřP@pz.PMa;Ss:cN0)7?j+UƿO_.wLf;R&d-ϲ<]Z%_;>u},YiݲW7ȖgsNѹKch k\!(/Ȣ0$"Qhce('7^dz*S>xA g_ n>͚3&nmޢ0g509$ EA)4 HIsP27,Zk9&`6.Pb'EiJ^>E.  &(+}-ĶZ!3&d]cP?t %pkÇ?\A8H^^_(9vGs|޼,*Olwkg|^k'/j\lZ%9*m9ViQf!p~"^b# }qeHk]wOpw~qo^CN_!&ǎ۩׮Zc*.J];PsvO":_ :ެՃf1:ah_Omҷqշ]jTg$s^AA(hp[bתutk!3(r^t[(JxR (beZacIʈ ySh{gF퟼ !T`Z 1䀭oeT@97;+:V=qoǃ9<\Xø쮣@Pk2_~JQrJ G#e( 1Wb! YNdcR2Osdh$JFD~ _@Bі+V[N ⢐0E4ͅ ^n e“ds&JYI- :M R䚨Le SXӜA;PIL`()5V/g澸wy-<+VWY,"gWM^$7oK޽ŤOLJh5-AM}Q\2m_C%XِT\K&-)zb ΀vr Z=uېP,8j3xM]#$Y׫dNN?4hgDgDܸ~j۫SGzQzݻ>-Yfꙹ P_)Awy %'o~Jት2/gW51't^_f?|ȺJޖxSNL۳ϯ C i} Q#́Šb!5gB}ݺ*-3Z#`ڛ`zEf1:! (G)xK'aԐ30ǜ9Π<+p{@Ƨש_ɮZQ'+r9uUW>qɗG˫j"׳$_0uf?gdp 1p4ύwo\WՑx`8v77u#0OTֈ 2e˵/c630/F+yTYs:n16 )ĻY0'b11gxuNkA[|rwB^ؔ lG fXEb51F>#h6ˑ({_EW:nmqӒBer[Nk+‚@elj~a|MMzrʪˑxh+dzK3O]0 } >dU`=t>r>B+g KF~"/uP!)73SPS/qcqjܸ[' ځvKw=iէ:<׳+y@*Gq3> ٛeryN_ v)f3;'aȾl"vIc;ǣ'gf &7HB<0ݶ2 ETӖLDHM}¢"`vvkG[ $*?+- `{mY)#J']R4JJ/%ISjtϭ{͝8 _=gRj.g++7]R@mj?/gǰ[֓uZpD7Hu|բ]-MhPJ!lPImM+RIt!PV;np7;„d?NA3Ƚ#ɯ!;ɲ ] $yfLSCɞ <EdDmn~ԧzi3IwRFQ T [u\{^,Lvp^aQZN/KT^;r롤 t/Dp'M9$=?RZQSa;k <6B!(-G>6鱸i>IYh -RZA2EnLYp^%$7(wLi(rqgLBdbNja B@NӴ Qd4ٖn 4h0V$}` CI:Gv+"igܷcd9O `^V]qY/mrj K9{|](P{j=T]SK+y"S h=Ah$#zM |7ҾђtԚjH+.V>nl<ŀ ֈG:!O yt+ \sX!0'/w~Lf{,(gV滏vi5[1\۩smε:vZwmd˳y*M̭Oay",$\C#TR5(M8᳟h>*8&Ϫ%G+εي 6ܝ#jEe~)o臷f xOAּ|<4sF D40{Dh@F] Yz߳v]W"J[(kLSi Djyv3 3Z{2JrS(DRY! y( SLf$B+(h~%&GcsTS śɏZ+_ ,vN1 1&vRI%IX8sJA Reݏ-*WyyPfe% T =-T4C{U49 d-Q/. Eq#8ֿv}b-٤qcMUUd)`QY@$oެv;򫰷á݁ŇNoW9I=TU;/͏unx2|0; ]׷ȇDxUoS5gûM((aKs畼9+x ~Ȕۣޑ!:SR-~~݈݆ :C#ǻ}FW!݆y$z1!:S#ΧMs6T)9m,"4;wNz@+oV F% i[,<` Bvb社:ۇp#KoQڌ  fG R_J-%3JOI-՛q@>'աԒ1'xB]}co|^}m7OoóM,Qf1>_Xbe)<5 qXIyS92ΛBȹrH5(C(b)$N[Q4,c'f]ǘxUaUܖV4l~ӮpϾ3=4s4}y{L@^ :^?f%fɡ?I簑<)..PBd˯@3}9:˥|OC,71\=ВLBwfᒱқ(8\l"8n55Бq^eF#漤@G;8e'|Ӯ~ p~t<GaM$::SXPx1ΈNXTS917k^5:)u0G@ij#}+mƣcPITYQIb$m08\qL0*8Uj$Tٺ(|~O̫JZ_zGAc$O :XC`$!:hz޺{VyQ14{ںmhqmSDwbr&&os#QyڑI#W9tO\ڸ^#*dAe yX]%/{^(m Jjg#GFty*L|`mdwFAWC͘󋛂P (.֯6.4')0@wbpB$p3j&HĽ}:qHK{ߕk}N[ճ;Vs<>_>:qLɸɵXz#/hP!Y !=\ 6㚫E`-}O;b u8q#Kx  9a$F|Bxp< T;h*~ -ֿ0y1>~9RPmن=WO6gNGiZG!(d\J36LʾB$+%&\V@(wY.*P`NGL8% <\Gυ|SbuX&MTrp*7e~V:qcË{Z}]^?>xeTw[4#'~l.q?v_okiBCqTwiG?Kyoݧpx~.o(YQeQ[z} 6@ >y'feLal΀O|H:h:7:>YJքgk*YRoS-n-\hξ{t)u 8~'N#Q(no۫v7KW>Ds 9Rgt!^u08 P%@o[A(%Z 2\%*7pظsF=N{?BW#75 lQ3%WVp[BK+FJ 0IXIÔ#rTb˙0n͍ЂM½.6zOTbhPIsÙGO<'&!j2sÙ&9L -&!@Q=g" s;9: XCaFxfB!P.,ʋ }c$Sٶ[8Y"؝SydCBG`oo)&6Z/TU04,^B r3;%[K#@ʼR.z#cL}B֐ %2qr^}v GVGtpPW$!Y?hm)tQ f#*R:GsU đx3;u@CHNOONH彏Rȴ(eY+㬪pCeznR0>_җ,z,8q5!mB;e&![rѱdj_>MsP/~굽uT-{ߩBj &jV3_*~*ԭC=D=5}*43OR0 -"N\R"eA'C >EzYJWYzub70.0!Ab}*4gL#RJT@ g*PSW[O \->Dj%{@ySC3!*/лUK;^ 0m>٥bmإ1e$5_jQ8/5ɜѓc]Q8ub0n >ƞ䚃OK-w@(7[9$`Y- e/&Nj7yG&ݧsXۛnGm.*HZ㷫not]{lIDu?T3`3/VCc+yexx}ko6 w(>JUٖ|qo7kc9uy t9dn"SnGB+ LI1KjiaEG鿑:ɨP C#guQWQy) )x7݆ :C#ǻ͸Om*dmx}w!1|.d}-" g%eElQܤ !MXkId\7;Z^g>SJFg xi`q蔛G_=/ա̧c *H~2SD'BI-$F7N7JIEX$srJQfS`e$ЬIHl(uiQԵ6p pHbIJUQ+Q*ZGȊ T (qkB\!6r2K{^@"*5B҉uB% ){dʴɭ.(iyU#eiLQhLS3޾U\0MQ9M/?`9ySrֱT۝y(CS!Rh;L[*=O{K7RWuTrQ%(GЂy|J`#BϑB<$_-w)ق@9~FlLIS[t%o*x GMlm4Wm]B N") [re%T t2<,RrU8.8!NBQ(noDv7 s@_~p} 'ClG:,]h=A T R N4j֚HaPT :PG{@o/=MKp4D\Vƀ=3%Mt*P`Nך ńSBTvf2վ pzmd/U 9\=xDB¯V|zsȷ㛦|q?i0cm۷nVTsaW?P̭> 5ݧ>/_R X&p- {>JPjbڜQz(B85&_JPjQiRJ$B?{۸#_vw0bA~iP$5=E{(S,@IJx?zҗ]gRnPm3YLd, o?ٱ'6Y \Gej=ܮs$nLAq/k%kJC똅-*a*y"Zs*uYآZg:͕Rvy ׎+zhUP9NEp8֤G^q@4*/!$ H}D*8mk&p3J;x嚤WλQlMG-z3ۮx|mG!"DXvt ȅ./-u1q~.ʘ&XB2.  2-B{nРW76OϿf| c ~]... t4ɴT9ew9Uƨ<ϥ9$T/!!H?n~(en7v1D]qLfh/[k>EX9rɃ {@̏PLOjXaɗ 3)[<*b,?U^뜫+faņk%;ϙʍH![)[2ZnSBP";{xF-)X:d,gúEXz.jn:E{;'s7~XbD9:z5{S/qK;뾬 R՗gT}i9 0:Lӝkr;majQ7I N5>չVImՓLPL4/5ԠNc0 Su<0JFwĆ``CЯmd`5bh%|!/PNa2LSI2w)Q^e< McL`(j^RLH&ĐzDLR5ȘӢ,ePťDxQ#p#&!NPq$=T߮S͙ė$N#W01GZXLiOK Un9bySZ8a +7Z-яEh*`/s\) (7?=%JJR,L JLơi^8TKQ<QuJT~%oש%3JqχѲ(ő>߂jAe?oFuc, uJҒjQovA98ս@)Cq(-sD)q(LGZoS8TK/e'$"T1\GC:BӋ-=oR})ǽ@)Aq(%U8u1;oFC/ (s FC_RQSX;'5ꙁOG7O3+^Ql1Cf { rD_جz`Bgt? ̬14/xxfG^>73*! s5Hi^*&_Zm8C)CIk=1HVsb_&vXjfZfհ<q-7EDc'Zur/G77Qh{c[k s}K#0FkօdfY&9\{;U #PK= Xe#ʖע"vžFwcuIXԅ]DKDa,"0ӎPc)J"RƩ&Llԁ8frM֌-!<ޣ<咋gv-N5uܣB6`#7Y$ҭ /$8 K(8,qqJ#(cEZBqٶ s@acQ{m@/r41݌Pfa LJHvg(^H F…LL6"v+O!m2oS*1C nlX֗jE ~}DQWN!#{k/ Ǩ 3:aemxvUSVu#Q6N>N}WI=+~@Q15LU15cH kJOt߀^U;])RF遨H4>jELt_ u, =ܯoC*O1'p 6a,3}UO烽x3웰dw;Egod,{.9C偡r7Oxg#:b)saW^u?z-Yk.7%lz}s۰jjp[B$L)zúaU[ b:FvTά[z|uk!_8D0Qw 6}n2NM)k sm2#[!!ndUgHU:pz;&g8L??3Ńpw04>hQ~:)DC]/^g>2(ꐽBE !fe##GMJQe },`IB;Ԁ^EUH[O3(<cֿ((abUD((渕j9pgQMo=p\=ݹRsr=QiHQMʄc5\5 j(AZttT(rC7,G`D1NfiJ߹+­_-#A#EZW Ca𠖎zVyP.s.Jh. ̝͑HkA ''ɐZuKA tRhbN5[2֭ |MqnX7IjrNhRe1XSz[2֭ |]TdyHFL)iQqG/E1ǼakAQi!.Cλ2G!po0Kq!jϛ?G}l"эa.ͧ~07WՇ2D/Q:^W1ndƃ}>Q#rS0%~IM%AGmX@ߕ-`\-prQ{[߼ {4*+B5ABԺ\ FBVkv"Z.o B֨1yI 凤R&\ADlk4"lj3)k8UyOrL+&w`ZxdWSp蝔C8zX9X ZݿR+,1EME xY{15BUA"VF*Gg@Tp8RSI,UHΜN[bCgښܶ_Q9d\(RY8 5#i@j4Ei@")!(ht_'LeCV-]saq#:#4-ݪǥ7GS:-S*x2 5V'S`+gr^pe=77Owq^}bq&\mbX.rmfP֡POB$N DҨ\ݕ"&̈́$*`-Ғ 8H=*D%K$m݉M%16*n~U| SS w/T( < %pֈĎϦow-tddb~} ]C+1HXۏ Xe/:b#eQ 24,&bṣm8i)BG"*hj^9%>%]jН@+ze' G8[S.we^""=Z:/ˆ))2X$:$N֩T!@ӑSQDN뢶0oY}9@ڐxJ&qlpQ6$M(3M8RJx%YED"A,KCB(B)MRQA]iuY@> ym{1,`V@1p+r fY̔@L13KJJ6!Z@)Sп]'뜽_G?<5'6>/z|Wme8r?M,z-/t: yD&$O(D@Q@Bc if1qTE D25\Gfe$XIͷD=*m(g !z^x7O-c vKϤRv-mOmtwOw^Y10zbX 0.W΅]QLMvQL8O ۬MP9 F:MEA{+J%rl.B5 k%q@F޶mޫV^w6IG}v8|T<ݹ*WJz9mW?ܖo0P"DT ir|wӜjizۭ: pA VKͩOؗ lfxl'Q_Q@l_0Y< <$V!P$ %kSzd8 3hy`~W d-Ka1οoZx=MYs]iq:mn+z;b5mT֔(_`)ZwR֚,P_YkۧX/=j5D|Խ3켒iP>C$KP&!:xkHT3|/؜jcH1Zl*JgNmlRMM5zZcrkWˇ(݌VGkǚt>8k[ٻn578/[=cO@C0ltº w tdŲ?ni"&Xm\)J/ܩ.$ײ6[anrAn?o~}a, ̺{Z64䅫hcalf|;ۨ'ۄrood8 {B^prO 5,H*\4IО%__@ST@;`|_TCurz->Y?&EslilnQ,9{QV#Yeo=ˇ$o\{0d|"$*? ,(oS L璩7R'ϸ֡`FU31U$,Ǡ)""&kH a҄gTǑ,pע:TOLN _}nQ1 -GL5w{b?-xb!BKQ=-S'-=j-jRtZ ;~Ny:iQk)~ZxCAh>e_䤥ǧy|WhZijFɖRnvZJOK ;EOǭTi)U0-GL5NIƎ;~N5pcRyl~CRy qGIKQK1i)OAh)?-ͩ6;)?n-%OK=!n>p?-ͩLN[K'Ǝ=cR!AftuZ h}.zZzd⤥Ǩi)9OtjN1qk)~ZJd~/ t2SZJ=w|J&H vm"g]^ց;؃/.L|wiX)Г;*/hF Q`NJۄi_nW[nқ  ~]EۛgIlE:^cbywt*DzJ)lDWJKH})[)_\PRyJ>Fg'Ɲbj\`B~If'- Lݖ;$E(C X܈;Sd"tƪ:.gg1D,* e܊WRAӇjTQrKv+Q/\\ֽ-1`ebgDe~<@h ˤ N&,T6J6'{U_!Ğks{ntSNg;?r`RղNDLZ!6UZ!.?e$%𼺦tt{5/ҵ)2 (CLb.be8eF9GN@'E;검P8L zqɋw!.s :w+5Cb%Btd 4CpsGs.IE_oWߕO2a:-4qNR#V+%X ąTRq'Qb[rLF6!#m{k;ׂk{[W#C>!&D_LR,Z(*6!"TDKd*$gQX/o#Ydbޅ~4vkl(y(ͦ5o82)^>~9 T4tsqF̆?FxF [gƨ7}%9mofO\|ÔT)y6E+Cg<61UDȭ(gK{q|Of(; )RO& YU2֚NATAQԑ2#r9<<{|#j׾~pojAq*x0U<2RDYr30;D }.6 5.v=<gAR)Vd@n ٓ\7q;o5fbLn3Uyf~7$/TB /oZC %D'? ؖtp2|wBHOdYϝ~-}dv|njϾ#uK4{0OHKr|I );b5$JU+z믴eΥX&_:ewW;SښlAxsm^zo#N|8<(Cx8C:k2۴smZ= :kD~ -_h!um'd!kG&XͩfACTs >XAuT7OBiԲT0!WL5'NyqsG^pl;w7ՉȞmwh݀:":-unݯ#o}0K_1}rm>v.]p9Z_Nܥ#-FmĮ"֢fB未6{eq֒p ҩ=?;ܩw n*|jAf߄b;/Ch\3]ܮV7E(GslKUV"?8WyhQ.[mDN`if倜lsCL5^RaCyY1' Egww?32^:[v2ƣ _e=-gɻe4x+7| hlvZT_33Q~,Kdc79EnlL{J2@͕zKZIlM ]@'E1^8/uP"Zؒ9**(Se'.@&Qm%C :d˰fIZL!-Ώ-N?VJicy%u% (P,JU)I+Zh V09-lY$l)׵t qV_2t&7a9ZH7/D1#`,#$S\P77Dgxj Qy! /B¸9d~(Ɋv' ;[yR*OB#(!AObl$ESjj'uc+ZMq^&կ8y1zS2OF"#ԡSGFF炽N9/ :lNY[hc0J1^2VqBqS% E eev(TJ^8E jQU#u"] %g̑a8.]Y0f?77j^),U5^V=͝ _ 6JKqJ{B|\7\2>^o ?ǟ a729B̋pᣩ_|w<V =;Kv/f(r^7Dg}l"ʅ椧0?Oc=ho ӧ>6v4M)NbS|JԬ1pKY6L?Q.ґR0"=rkt9]1nvf!RAQE8(7D#U38q,] f;ls9 C%h~PpkC]eQ2 Y&N3aPYr`~h-mhS  Vb F|_ '(D:Ռ?V)]AzVĴĕ( #.`R鱂ZZ+jP/d Q`A)U&K VY aL.$FVI@zm؉Y+QFŒwU~zV>rO-q/>$a?]gRN䖿寡n[Ο?ǿ~sʧE{yg,ʶTϟ ,D6W?ց@gii~6YWPl%C?_8m/(zwlۏƿt Η*nH#! 9 zw m b+ܿz#nwGg?ɢ"Ms@Nݳ %֛݉h "5A,_68 L*MWvCF궺'h1o{[urw$~t35-O eT#+y7uX14tQZ j}槯J  meUڧ+FK-Jzg #/|Fk9nq,X܅ɶK"r]pH; ,  ,Yaڮ )G..5#% 8{>[zXR=pZzZ:rJJˢ*b 3:.r~ O(*,2~ղC*J0ʒ9#m1 \ԡ-_B/P_aq=!%c>]; {AΕ ss-Nޣq>Ah%d8_?(|ǔ!9^'ּNq)}N_ӓƇZ3 cR@kk)ڏxӃn1:(K"\jn{єZr5[s1;;Rnom*>w!.<䍻hOI25Vtp薋A~GJto_Ts>XtV46u!oE|JʚэIq -6g`ElEc[]OIkkᅨ M0S_j2Q~sFZGupYCxQ.TJɾ6gF"2IK$1oImMٵ|wٴgBa1d&go߽ #h| 3}yifv2~1z 2T}FgxWLM-{Lޗ GF`kV|Z*g94u[Ȼn6u9|*>7瑶X%:9|' ލN? XV'kĴ(C%[5DJؒJkizJVqVۍG;q'q׺]Y;L6Пf_.B.8 qwDlmm jE9-7q Ax97%@NDT1&REF.B*g-+LR+ HxIHr !,U ]AEȉV.Ohk]O]/KzєqajtvZMmT#d\-0q2fy<0 #5>Ԗc.e cKX:0~lmar@*=I{GN;'Z\HEie>%9,ߘZN3vI8Ȥy̲h kkUwW QS4D /B'aIPZM5$Q@\`TSa"Q hk4(a3P5BB;^%8\hQ9gm18XQFVR(cIVB0*]rGR_)Ͽp֖&=QlJpֲ jT֙*C D'\m )J* Zڂ*ѱR, u?|, E~}O|#ri>Z<\&)Q|}~)AA'X»륌?6V"r dZ)PG8#,YxEz?pXo<j\gM'sg1zrѻgepJ vO@%/tK_*4mca-cGnN;7cϵ9j ކŎs>QB1o,9w)B\MdG\@޲89R;uFRKM %9!>|/Pcf旘cްY+Ei.tqi_ʍ^4eAu\W^$TKEe\f9TK7JemzX&y,$R1Lҥ (Ewp#k}Y}{:i~yO~,R\HJ񸮠!- 3Ȝ:pn Ô,|Ga Zc29$c1G@eCd3i NMe\4GhgHb:ųDjPjB~o1RiV1͉5?#JN3tE_<y )yA# ?SS#8&nHsBj 5%?McVPhU{ !Uo[l: C`~͂Pcp)B]^(4Jd X$!a13PakSXp{?Qc+ܹ~cįQ+`юQ T;'_xI!`9|)j" E,d5p!;H1֡޲𡉫:T'cU]*VюW,\1}_ U*)C<*5˻OaPg`TnPgf_=W\?Ζ%nwW ~4ŕu>|n <7a3皋bYJwgk2p 'rҧ^uiNJ?{F( &n=MX- n3}4fɔXLZF( Y7>!dHb4_'VKT=QCh-]R;Z{&V[==wZE?n6ˤΊ}fO0G6JJ}3 ^z+0Ҥ~gf! *̬ӞD| %^B^GPD vRRW>n/KF>FڦX9Rs!hp]ZTESjY7((H"!-bb-?%!C޸f[Mڽk#F\ +$7E]jAFP@X/oc d܇4z1dΪ*1yTG*3+"~Kto}X3"yT߽ӄ A/! :DNb-"1 7L% VNkiZI%>u9hlbO xciׇ򮼯${MQu2_h #֓O//tkV7iOO~;}|x| \_zɒ1~V7AʻvCMr79FVodVswhVe)’XYEXe!W^M^*e-^i_=5M_K jd /} >R#|w3i$]?+8N7R ;(G%MZhș{Z _շ7?7J;)]ڲ"G BbŨ#gRLp)ҟb#(QR! !,/VCا|c4x wF(ZHh)VMf,W۾RTmwҫwqwE(]=~lOڛ?1->oU,~B}aO<~ٲځq?L~|7z-s:#oOj?T$K ?dYz(=g%AKyVϴ*b}{YjP mKWjyAœ`@Ln6vMȸs> R5#jOj~F)TQJb@1vV'Rص->WR @qvg"JTQIYJZ'Ri(Q jxJkhpQz(U: *fpM|(=&ծB;Q VI/V(.=m R=)XJLCF3JORT5aFaƍ'}~YUvqavFګqVَ懭 ыsr%@-h?;;)Uhqa)mT],~ h̬ԸseMsx>?xv qoM/ Wڰ0rܗ+WҊ,W}+S"WR ;z$G{."M䜖~4E9u !G+x4f~7j{jLRP<Ƒrs5$lC]",qFatyL*)xLW" Y5H**A #KZ@U21tt+ur]#f4zi$Yc riUX F;`Fk\$h ;+Ka5v毁@h[}%D\@']یSEd6B8D0eW4hfn6HN7D9wd&d%/HnC CtS9@X80+q1V7åbi+[XFcR_JUI',K6>W mPz\]5g:F)i(&ZXJHCi-sn~(: "6gET4n6<6J O#EXJky?m_3>Zsq&9eKT5K}+5j~.J>mrJC))?"P) |C'(=mu/EE4QjLx(C2C_KՙQQג j j28.}#?κ=.՞1@3HF~1T8QTg%"Yni%ɚ;ed#=5ZiХkJ#umbە|>j_?_+փ(GXg"UEC:f/B,'Em=RN-/sXUs7e:a4.GƥV:Gξ1*!Kz9? \ `dj\'n9,h:{xsZ'jHYIt%U"Ɋ\i ; [SU/]Py#I1{uRG=R۟.ndd[96Z XHU#%ck- 6 USETg+_B 3"rwKYݕB;7{|]>\?~Ǜ]C}s$i֜X3;줽Pe*x`o(M$, PB0_ Iخ saa؛BW{OaAbuFX8g=`/AZJ (KTQAe[,Hyd59r(IaZ JP+熌N倪9'{ LB$|mHpWT5&{ulZkG7[-'8&/,l쑝!6Fą^ړZ7Oi$otb/F,P+T!)?SE7!"xRrmotn3H.?a21 7,L݇LBދn7gn6HN7Dw= 'n8mqTHLc,P)oheKz-oor~+_o:`ݸJRw{ m$Y`?E, D _7)WUA\ h̎Bmqu_{KcQ+D,˕х_ib(j]ŽQ)^as9u]x4}M4MJ4's4QsPm/sGY,?Ps=I]Kz`ߵѮk=N>ddlĜϲ>˖1Jɐqնsm+]?ڇFK̯hzLҫ=Ibx7NY7[;8Z*tpvuD6rzW(@#k#䒀&9PWF +XH ۭq8 1VS|ee?1"Eq2sT$𨝦Cj^d1V=9uQjsJ 2iF9:l}еZ/Y]j<$z# flU%TaZ4 I\ vOW$+ xRKZRUT%j% Q@Hj}%dMjnNπ{u^Pr*V a{(\JÕtQT {6ԉ6k'+!!=VN TeBcUNbY5\iS(F]bY}D25ı,[ñ SeCc5H!*x# K2wF22VypC`˱0aJf5Ūm"BE^*(U‹E?Mٻ7n,e6;Q/AǙQVR{ /YU-QD뒻 #qK~|| ֩1fLpx _uu@#&Jp QRd);b/'&E'we-sbV1ف賃ΰ]՛)h\eąrbxWD ,@,#7=lo[W={<=Է/g۾exƦ3%¢ g%بEʛV9T^W/:t\mkw?@-5vy]چf (}T}:S\7];{ި o|/QGAD8Fv0@`eUxmwM~Z. {(a<#d9󌼦*`B2n*>X"fƧs\ŀ搷Έ-]d QBn!{PS7sz\ccM9T}W _gS*(%;4'TykNkԶÑ]M³A^|GKNmN5иtUA5.Fu V0d{I+D QԘj.u>ֵأ-0ѓ[,.wUɣ7^>:#<ֶU7tǛ(q}gtV6/tr׳M?1eD.˓tlz4TɔcrDJB)terAO;_P;^i=ӵ!SB9D0E=Ʊ>v@݆LMZj6!)X-}&/l;v%i7 KB9D+ar7clpJRB@h,8L\:"|Ӿ\'ELj7e׫cRV~1,_P>vE>_=`uyTG7_ՏO 2T@4NtZ x_cp—E$5S뷽BhZPQK̶{ $ڽ-;NJ#' 8霊ƳsN '@c=Oa_$ER ;w9P#GJQS^Bpu$ؽ N 9jh@]L!hoE>,`s""80M ) $c<k3TFxRZ<z6UQ^>>SV#^ xKZf9z2mJd-1ܴxHE®jXNۈ04:ƴgd?A#OiKddʶSob۹Dm ),-O( t\+tE-Au1 ?hI: .*0L:~xU*CӁ`3]LIj}7++++!&q8P:X 1FL(d0XQ3"I %Cy -tDܭt;嬌"T"I4pTT?8{0RY0ME6h&#fID25#H2" tqB !sx\|!W1 Κ{@SQA ~U,B7So<; &A;EO|ǦP`! d4`HS nds k8V7_l#3*L-{9:2#mn/GgY0ԤdF6%3%`me:JU>s{ݘXf`DdY[T+BY ҄L (0Bqczr/nj "Q@*F6,6JnG% c ,(ڻWw_˽S)dͧ@; *N#}DDČHFsEs)QV0꣘ .g0 sĖ3j%S=T@h:C0)Xo- '}&]Tä! OS稩$lϏXZ>W [uV>}fZ9v/S=vf:@sIl:F h=vya0+V(o\[PYqd Vo2@DIJE392)!9LF0MD8E 'SlV', 8ɨ@'JcQ :DZ!Tf<,jgJ Q; &rB.&S db{rbBV4ųW@]nr?qIHW":FL$PB(LQ3R eht(:-/]cźe= 1=MBŊIBmg(H/ S)<g OfBG' H0P8)+0C!#sX>Jo#jnKfPe=o&ٷuPgpXlRc%#;gMA d ƻͨvNE+yʘŬ%6w+ t$j_<R=iKӶ-M }3 Je%hỳ,p0XO`wPdSq{L#kqq#D˅w+kՃβn*]eVrm>(IZ@EǸ;1phc.{r=-\8HaؔP"9췯<׻ d$ dfg*J3Kg̑rm;Y`̩&?]%'RLG2,{[-݊9DO9ՈS$Ru 2;Cσjb;eNȟ?ep& L&c]=Qan 3ܩl-ogULj+KhSR9w.⢇WryTbs|xSշj>x@+^^v /OUVTeX!p:-Xжfs/Ao66=>kC%w31q zן^^._\G/ʡ_賹re0Fb4OO BBN&Ly) n. lVxyx4g0wN/R'; t&U{un3ϊO,\?BHD"j4^NWj?_:}qiCjK<X!ͧP#DJkK(ڙb4)LIdɧ=ō+o 9FPu ~æJ9e*f0ͪo_6޲ ?Vwm/oW[}:鮩> 2gGejeH!Gkdфf a+rŢ?5g%Μg.&hb6bl},+kĐS-%׏4}Kb0H d#L~#y z@? =d-~Zrв}K&bFGyHSQ`C:("(Õvⓔg1&DŽK 0 pB!" #KxFxB2F4f\*gClN><%ߕU6lD0B`8!!ՖBb`aƐp$T&1b/6RsW2 uC~fTq(/ͭNʧ.g/զtkroYx~}Nrw2%y)havOTs9qwJ@ᡶ6!XDQDI"!@1D J},i2B(b,P2N` JlmN, ~[77:XT"ױaN*4dq&eS2*2uD!h4U`d')@T{;Qx^ikLJP+߯nW(}ʧgcqRkX/U/:^ͯh,'bP_io}xybsk@u)yeך6x ߞݾ3DŽq Ԟ.f7j}Ax 0B/f%zSP NcҒ/Boo+{yUiiwWf?PMyﮠR4v49?nPm4[髶RUّL-_cNJUe%JJ-c)NunJm u;{]Pfl~i;_}|ZgWW|n'}vt&[-ڀwU pqLtgaS% U\*!Ydw_csU;Pˤݤ=lu AD}(W[fdi,40妤v~IaFk\X\vs }>}''F\;]\l1Jj݄k<^C_݅JDe*caV5NBTQ"v]k]yt=Q'%YSR}kU׫:/:88TQ=z~:i?pOwT ;ŴQ5[i=h+rG>yTc5 rymRh !Ye=7By#D4&al\E1B,S4qUkN!*oa'ʡі:I<a3 tR[+׼`,.$:ޛnǔ9\Qŗu"VicU<"4W2~.؀/6 +tÜք&0DJkMT$<ޡoD]UYm^ZPt:Omϻ>TtԍR駬H.D_.{dQDl];LE݁vԉmnT{ӯJOC(vfDicbD$1h<MC)Tr[ mNE`偺T%f/3j|M2XBEc y?L2N%[YlF&67񖔖ՉQOٷ!F0h3yPihB Y.rv2sP 1VVh =SnIpjQ%]6VcH"z%1X,@^.MҸ1jTQѭ4;l s=*R&jɰWA;AʅԵHi'tF``)DYjTdQ$ .ڤjt'Tun{&uPZe{Wy9?nP{/0`0O75:Ⱦq DǛSB7 ~x FhRJznFl`{$+]fwς'iVfH-cv+&#HIRhlT|tnmp8@#AF݁IĬ<"ZE+_BF `NPHtlu0xc^?11?^=nڲZeikߩ]xNRTV4fS]"AGݚ (UNr g[ Ĉ-'oӤؔ%*>dEQs@`0mAOjIPehEGFsv*>ChQ+li\6r ǰ @khē^-,'GeQ0d pԖI~_s)OP+0̺ y5O:/8+Yc:u,ʼw|͠G9ҷi_;5j77dUZA@ZYBIl ZYa%lT Hޚ4pP D8[ا;cQ*P/JU[&$t a`[:P8`Hx@ZimIcVB=eۺ˜~(j;ZM7kOQQ}kb{Bկ}U]կ[+y=Rɫz܋M_.bЕt)]tcv/ӣv2w(Rpncpp"}z_18%crsXEgW?!uTq } O3)r:7_ub^R*fۯQ9֜=J:(zJ<:[ԢPf㗌~1ߢ gNflUO_㯐,lDT@*A~ ɂjzO?:7*;~b[quwwQϘ:8% ]gɂ ! Rwl[ʮuU-+8}mukEnI}1 xY>Nݟq}k ][߽ ZpZIN-{ Ҵ4hqGik}XW=|gKX镃*/OE$0w%%%%0eA 'O15RiXQvYXhK㜿~~Y}O EnӖ}_e3H~VTmd\,yFJ^q{>>@0fʔ\kKnOnoLD8LC*iAH+rrI6cLfwUVΊy& 9_~ҁVD`V^Vr8*~|V]υi p=ngD {'(PYG+Tˬ P/?T|><Zz%@hNvݭ|ۉunhyp趻 4j(1F0+ ( pjةgխA'u{sޚ'l+?/?7)(bzdb5֕;Q9ۋK]ъt"5/":sF?[C|0YY4N6Q'1z, fWdSgZȇeSmuɮ拲NIŠ5gqȣϞ$ 0k=04Q‹4S[~JiT0.(_y^g6!gE`ΡeJeb)4LқOmsmZ-zR0Z۷^RL[oH?]ᵿ>'ɡƒ̓S]IX(N+Z}LHW׷)f1~n]v+wA JR 95׬e1~ #']OQ5w7L$(P*Bs,"r'Z"evc]ːN <6C26p2cRj3\Dbd^%KJhhr.icHQFIkxq%*y Ҷzن2u&^\bxÜ)/{rQL=O(O2z(i;?IaU8po柛F3cFJFȗQ9ZñaR@0sf!prλ.i7əu\˹%v;|uzm z@Zx, ,XʊigC&-J[%_śsZy À0,TR#wTa JJrcqOvs}D~>Ͽ_,ν R @\&Y)()yL?:9zB_AnnB"zw oJ2m9ΤIY=sM2>x&"ucqgUϠv\uPn0(l-϶ rhe*g"A5{p >c, m\ήYf<[xϳs̆dYL%[3ϟ9Kτ/5 +×_]Yo#G+fI1Ȍ0m 5 xJvL#usT-4#,LnX{Diy uv6lMEQ9[h 0ҌUvUxvF@f$cs+/=OJds86γ-g@gOV=WFzLH~&FiD^֑QTZɒAON>.ώm#PW3bޑ-|{(.h+\,IPYs 띰}ӶksJJPgcnC R%ێ%J(J$)b-!h"Hh F"k5by9]kgÝ֜E2p(:q;@+e!N9RJphVA2l۔J&#Rǔ;[ˣD5Bd 橐+!nP<9B)9~ִ:b ޲)bPp ʜJ BIHҎ}zWώ!mgv l>dV/MޣB^̻`D9zg+1[5Jg turv_(BS0ͺNjv!LʹɲOh{8v\3F,Ѕd- Me+SG+F'w-ڞ=zލG+/@ҿ!, J厀BwZ!C]sʻn`"B8{dl"9v"1CH;`;^l,-gVi˾pgVKD*}vӉ| IY=x(e@2(4P_v8XE YQ( ¤SIP ýY_QY\βERi9КBSP*k3w':&>(s1^}NY2OQЂE}d,3&/{PXi쎴H|/gg1٤OR!kjm& a*:G Eρ[2m٦aՈǿpaX lS09 +xhSF{ ^"އͱ X^vʆ>e]`S&K3zX|uT @zoj㚠Ai ,)IuRjs4E8Z^$ \N`X;6 v17)KZz'IŬD88,|SS30M;?a5mζo*4׹X]k$ͪ W%']m KND&RC.j;yуRy`Ӧ]/4TO/l訍 t BMƘө3~ kIVM1lY<,'5;~A4г|sI9([.eUEUPP4ITT&41&Kd$Ę>elu;[݄iQ\l'`%zRIhMd`g? F.}}n2"`JxcǛލ٤. x6nF|tEq@C by]iM*l.)񷬮1^=?;>Q-jGrBsNIngGBߺݜ\Y i"@}~g]@$(,&1-tO  {9{rE|qə/ZHM6HЃVvVmc2gW.@:,LFe'MA<;Lof<=z!A[qW}V2L!6G|vKwb>g^f?Vf+FĪ@&Jzk~`p:۔^dB]`,<{z|]n[uyp_,qu@bvvk_+턑U l)͆y,JeObulkz1aF)+*c^)mv~1 9> ?z^t&O$. >?gr*j`g3)—/CP3ߣ!L5A%[r >0w''M>}Œh#!o&E %5zv4D;+*\Lo*.Qa}V8ħIb KEZg"8~F, #K*'5!x'46 V@mDX` x;T.<{9}:K*izq>5 D\j&7{~\`q$E~Ki^;E(Wb[Mo1[<@QɢNmfpg}2Ѓ \.ORͽ"A)c΢4%[m=IZ@Y!]weհx;hV&@9ߥPHेU7/WU[;8l+c,m]bsMmؖBR8tSd*Y\-J^Yd=wQd4 XNr;{|G1+G]4Ų* V@)ގE 3'# ~lU"g夈9fɌXws,s>?ݻ|ʷt,|orjmnAq?OԁުBKەigV国E1.]?KoN]ס`Ǹnu3w,xe>|[yR~6? qǎ57<8[Y:_K3ޏY78 fެ\g`߾L3^TU}>?*. (IrrL* Y9A[$8^`MƱ 5hgAeXgvѠHvLSn "&Zm`p4Zl݄nK qѥ|F |UIe1Ejlt4bѯ[2#qS*˥2fHbwy0QE #,M[MKJl_uhx~Ϗ}V]{Z`=Ǣrr(tяE{սs48]Mrdl=fmAW;,U5n*_yo=.WsEy+ݒ;,O48-.pIQW4ZnM)_sT7h?2տZWrx+<R?eյiZܴ7~sW[۫e\i*ε%jem>R22]"eJƦ t^ǣYgҶ8cͧ{OZV07'q1_l\>\R;)ZK p5?]M&8V"np4K܁ R=Y0F"P2u3$٬,f侦(7VtO놪'?S2Լ Och~LpjhH?`c}a 0|}gpZ `7wiؙV8l"BqeS ^B\<w2n~'dO{<~nCa=xb W7LV(>}@ʫ[2* Jy[8#.f4: רtShVpG4j9LƠ֪Μ3Ř՞!S[v/ Vr]*K3 ?!u4 SJ# Z/xBW{3)˱-_qt9[W=޺#=`F.]jKWV*R9n6Rh)W1zAy|H8NC1(X 1Jܣ:5?xÆ[9(Kzю1zokو\ ZG`$oAn2!7}'.QKMNBxYibN&S Ta{Õ8_,&4QW^Φ˯]?KnCc5jWrrF߽)YSdXHM_>K :'Q2Ú ɅJH}^wGx8uR;xxE!M,Z  h:-/gMabY݂#V? t!CDG & BR6Uv{dw/Lɑ{c_tDj Ef"\'E(O{kKILҝ_^~"(8QHr:ֽ0.ɽ߀R&h+ݚSCF75< c{ Flt]`f:`7[vYWl#;i7!DT o,-F>Ӡt!5ѽcef26yY+in,A7rEfiQ!)UI(MT`ᬜ/9'/H(>O&QϧIV"]?xaZi+ٸ.{rb1}70wF[3X;y)ƽn?nS psJ,h.7:E8.XS M\s~oRO0$i%|] [AC0N ϒחeCJ~ |T`f=L[)z<8 hp`u6xj"A:+1F!gv+ˎTK!&ISe:66Au&/tkZ/ʋMQV`b@Oq>4;g*"SP{\EqV#ѨGI[r#BJnd}r;,AYi )[F xlf^Ɂy[TjƏfT1m=fW }B(TűP^bP-BTzRʹ|.deZ %&TYf3Q EGL|XaHG2)DzkeM-8JҒflKB#R`qrsJ":D<5CT؋:IIQ(&"+(+]9- b}MoaMКOfKU$_4q&JbhD12."nK9ZcJm{-eF/@34v9*X5L[[2O99?[m$FU5Ox)(y ffR"gqH-IhjpڷBHzk3uc-Zp <*pxXўG'(e?ӟP;Maq=rZ(2\ "}YX RxY=,gLTKb =s W@pN%Uo@7>9Tfj ojPkzW%B2F1@s--~mJιB[N )h#Z ̗^3؟:G֐AM jjb'qf Fmkwd=\}ԞԲdrЫA kأ YvkMwZsOx*C(4! .}?-S{tMդOS7dJ0O"@)|dȨ)v/*hEc5Rj8zYHmiͥdX+ݧ>2Ay B6މqC#/x9ҋgJ N5rhχ|b9 V|bJlGBVEba**%UC?*3I}#kĪ<TJ@dh2J@4`vLknjΠ$)b|ΒXO/,}y]*wWUO+j۔1]6(:nU5P2O{YxgJ4ܔ؂q*^2Չ_OwMM^Ze4:IZ(nUG%j[X[-;i+=MڴiZH{[4@QiDV,$^ATJӐ\N . :CHeTB"xǨel ޱVu!Jki)*XYpɃa>xSF힯9kXJ6E%%Fe&/d>!:P/.!dI RX w[ ]DP'PhqwCj%_i2ht򵣺oCm rFf3TǗ-uchk,jAy7]z[VV}X^m~5WF Dlv*!i1[ؾZ9s54h=(gT)dhMds8Po,/O"/SN(qip5pg h hv~. SycّUڄ@%*e=jg47 &c=LL)'Lw>a'9cTr=Ք)ҪA^Y%Xu/8^]b{j^wc"F'=5qR%'A5S{Ht)T<\ƽ^ftmHFSLڳ3(x,y[FnYc5Z>75?ȊmCmTI]?fUKe[u"xK͏MLy1nHⲺ5+^~}#0)L\U5z85K?ucִEMV|[pVYa(/n|BP%|t6X""jXRR%;]>_+NJ n82T XkWObft@; 5,Te%ԥ@K,G*@o~ܶ_kmsO)J~ֻ"?R%[VF_3i3^, fRYCrXB>]Cn(]X 8`)ͥtZA.(ŻҦ } /eTTR(" "Et*<I,NA֡b3^J9P<wWv1g>>/IhzpQ:W' \6RW&٥S["M/4?.e`Z yzdJR>,q "rV,m{nws.ՖpڸPqvDR%67 G{/&1^i;)1&s8w0qUna3{:D?ygp$Y$8Eo[[)Qj6`[?i&J6,aRņ,~8n,TAk6FдMA\(*U9TC]JS#^ H%to$mǔPܑt'`$4(Ĕ (aSYn3H&GWֵmk&Fܛ72eZy+(xdSZPքHL4ּ|Xǻ!y~f.CmRdggףUk[?o? ƨቘY~=>>uő_.C=뻳`B|8/s=;Mφ(?^?kbu5s;xs+ h^=>^{}+2b.!1W"M5Fek; j@1!^Z-9" [j1E}B-5AU H j:޶v͉ ,ǡMLl ۛ9>*Ųm r&ͫM}gͷ s[VN޴euv.nj]K wNxP^gaMc(,lw9z9m2!}22z1Z)Q Q l7ڛ?aZ/qKMHȱ ^jJ4t-ÊR=Ɛ^`O ;ӗ›ۇY; }SWrkǫdt{&lJH vH"vTlC,%.FIȨF.MN?hbq =6Xֺܒs!!(ɴDiGI+JtZ2I rrU)Rj XeZM1 RνIMx&y-īg%K*5Drx!_$K.Jqjd#lGԼq)#IwnGԘv]#jnM+@H9EQ9qx'RShV?o>/dta 5yH`~˯~Uc;m˷yLf ?li>=0t >>Koc(p8CSK\ROHB,=i/nKKDQtԓʛg TE "#`v'Uޗ6Kc)E},&*rg 4.sQP*)͑Lz:mc)RT(X2XnгXJ-'y s=ͥF_fi\DIks,R+Oq yŷS`izùԊYz,kYL Kci!578K#ǥZ!iLy R/\T~tn.*LafECսM^fM:W=W{Qvׁ.Ѫqq늼dyfZӢAs>×Qv1}g(שIQ$h=&ӕJuB!g|NG^-/\n܆YŔĻ"RL1D+Hre"*r\%R%L4NOL-N 6U=)ؚUG#pl6ANLZ.2 q" R 3D򮘘xpNQjulEv]<1 $Y# yh$k䚙{_HcFssy zOc/$aUƆ1_߸zs$ d? (\oI¼koj67ii1ňvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005053203215134465133017702 0ustar rootrootJan 22 16:04:55 crc systemd[1]: Starting Kubernetes Kubelet... Jan 22 16:04:55 crc restorecon[4705]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:55 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 16:04:56 crc restorecon[4705]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 22 16:04:56 crc kubenswrapper[5032]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 16:04:56 crc kubenswrapper[5032]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 22 16:04:56 crc kubenswrapper[5032]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 16:04:56 crc kubenswrapper[5032]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 16:04:56 crc kubenswrapper[5032]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 22 16:04:56 crc kubenswrapper[5032]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.308004 5032 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311731 5032 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311752 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311757 5032 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311761 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311765 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311770 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311774 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311779 5032 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311783 5032 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311786 5032 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311790 5032 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311794 5032 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311797 5032 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311801 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311804 5032 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311808 5032 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311812 5032 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311816 5032 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311820 5032 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311824 5032 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311827 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311833 5032 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311838 5032 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311843 5032 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311848 5032 feature_gate.go:330] unrecognized feature gate: Example Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311852 5032 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311859 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311863 5032 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311867 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311871 5032 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311874 5032 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311889 5032 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311894 5032 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311899 5032 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311903 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311913 5032 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311917 5032 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311922 5032 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311926 5032 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311931 5032 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311935 5032 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311938 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311942 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311946 5032 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311949 5032 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311953 5032 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311957 5032 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311962 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311966 5032 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311970 5032 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311974 5032 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311978 5032 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311983 5032 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311986 5032 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311990 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311994 5032 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.311998 5032 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312001 5032 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312005 5032 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312009 5032 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312013 5032 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312017 5032 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312020 5032 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312024 5032 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312029 5032 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312033 5032 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312036 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312039 5032 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312043 5032 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312049 5032 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.312053 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312272 5032 flags.go:64] FLAG: --address="0.0.0.0" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312285 5032 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312293 5032 flags.go:64] FLAG: --anonymous-auth="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312300 5032 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312306 5032 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312311 5032 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312317 5032 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312324 5032 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312328 5032 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312333 5032 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312338 5032 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312343 5032 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312348 5032 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312353 5032 flags.go:64] FLAG: --cgroup-root="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312358 5032 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312362 5032 flags.go:64] FLAG: --client-ca-file="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312367 5032 flags.go:64] FLAG: --cloud-config="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312371 5032 flags.go:64] FLAG: --cloud-provider="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312375 5032 flags.go:64] FLAG: --cluster-dns="[]" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312382 5032 flags.go:64] FLAG: --cluster-domain="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312387 5032 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312391 5032 flags.go:64] FLAG: --config-dir="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312396 5032 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312401 5032 flags.go:64] FLAG: --container-log-max-files="5" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312407 5032 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312412 5032 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312416 5032 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312421 5032 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312425 5032 flags.go:64] FLAG: --contention-profiling="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312431 5032 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312438 5032 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312442 5032 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312446 5032 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312452 5032 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312458 5032 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312462 5032 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312467 5032 flags.go:64] FLAG: --enable-load-reader="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312471 5032 flags.go:64] FLAG: --enable-server="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312476 5032 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312482 5032 flags.go:64] FLAG: --event-burst="100" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312487 5032 flags.go:64] FLAG: --event-qps="50" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312491 5032 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312495 5032 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312499 5032 flags.go:64] FLAG: --eviction-hard="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312505 5032 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312509 5032 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312513 5032 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312518 5032 flags.go:64] FLAG: --eviction-soft="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312523 5032 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312527 5032 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312532 5032 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312536 5032 flags.go:64] FLAG: --experimental-mounter-path="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312540 5032 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312544 5032 flags.go:64] FLAG: --fail-swap-on="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312548 5032 flags.go:64] FLAG: --feature-gates="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312554 5032 flags.go:64] FLAG: --file-check-frequency="20s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312558 5032 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312562 5032 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312567 5032 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312571 5032 flags.go:64] FLAG: --healthz-port="10248" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312576 5032 flags.go:64] FLAG: --help="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312580 5032 flags.go:64] FLAG: --hostname-override="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312585 5032 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312589 5032 flags.go:64] FLAG: --http-check-frequency="20s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312594 5032 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312598 5032 flags.go:64] FLAG: --image-credential-provider-config="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312603 5032 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312608 5032 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312612 5032 flags.go:64] FLAG: --image-service-endpoint="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312616 5032 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312621 5032 flags.go:64] FLAG: --kube-api-burst="100" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312625 5032 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312630 5032 flags.go:64] FLAG: --kube-api-qps="50" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312634 5032 flags.go:64] FLAG: --kube-reserved="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312638 5032 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312642 5032 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312647 5032 flags.go:64] FLAG: --kubelet-cgroups="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312652 5032 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312656 5032 flags.go:64] FLAG: --lock-file="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312661 5032 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312666 5032 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312670 5032 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312677 5032 flags.go:64] FLAG: --log-json-split-stream="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312681 5032 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312685 5032 flags.go:64] FLAG: --log-text-split-stream="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312689 5032 flags.go:64] FLAG: --logging-format="text" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312693 5032 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312698 5032 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312702 5032 flags.go:64] FLAG: --manifest-url="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312706 5032 flags.go:64] FLAG: --manifest-url-header="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312713 5032 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312718 5032 flags.go:64] FLAG: --max-open-files="1000000" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312723 5032 flags.go:64] FLAG: --max-pods="110" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312728 5032 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312732 5032 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312737 5032 flags.go:64] FLAG: --memory-manager-policy="None" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312741 5032 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312745 5032 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312750 5032 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312755 5032 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312766 5032 flags.go:64] FLAG: --node-status-max-images="50" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312772 5032 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312776 5032 flags.go:64] FLAG: --oom-score-adj="-999" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312781 5032 flags.go:64] FLAG: --pod-cidr="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312786 5032 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312793 5032 flags.go:64] FLAG: --pod-manifest-path="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312797 5032 flags.go:64] FLAG: --pod-max-pids="-1" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312802 5032 flags.go:64] FLAG: --pods-per-core="0" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312807 5032 flags.go:64] FLAG: --port="10250" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312812 5032 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312817 5032 flags.go:64] FLAG: --provider-id="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312822 5032 flags.go:64] FLAG: --qos-reserved="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312827 5032 flags.go:64] FLAG: --read-only-port="10255" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312832 5032 flags.go:64] FLAG: --register-node="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312836 5032 flags.go:64] FLAG: --register-schedulable="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312842 5032 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312850 5032 flags.go:64] FLAG: --registry-burst="10" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312860 5032 flags.go:64] FLAG: --registry-qps="5" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312864 5032 flags.go:64] FLAG: --reserved-cpus="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312869 5032 flags.go:64] FLAG: --reserved-memory="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312874 5032 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312898 5032 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312903 5032 flags.go:64] FLAG: --rotate-certificates="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312908 5032 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312912 5032 flags.go:64] FLAG: --runonce="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312916 5032 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312921 5032 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312925 5032 flags.go:64] FLAG: --seccomp-default="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312930 5032 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312934 5032 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312939 5032 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312944 5032 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312948 5032 flags.go:64] FLAG: --storage-driver-password="root" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312953 5032 flags.go:64] FLAG: --storage-driver-secure="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312958 5032 flags.go:64] FLAG: --storage-driver-table="stats" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312962 5032 flags.go:64] FLAG: --storage-driver-user="root" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312966 5032 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312970 5032 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312974 5032 flags.go:64] FLAG: --system-cgroups="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312978 5032 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312987 5032 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312991 5032 flags.go:64] FLAG: --tls-cert-file="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.312995 5032 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313001 5032 flags.go:64] FLAG: --tls-min-version="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313006 5032 flags.go:64] FLAG: --tls-private-key-file="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313010 5032 flags.go:64] FLAG: --topology-manager-policy="none" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313015 5032 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313020 5032 flags.go:64] FLAG: --topology-manager-scope="container" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313024 5032 flags.go:64] FLAG: --v="2" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313030 5032 flags.go:64] FLAG: --version="false" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313036 5032 flags.go:64] FLAG: --vmodule="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313042 5032 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313047 5032 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313160 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313166 5032 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313171 5032 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313175 5032 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313187 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313191 5032 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313195 5032 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313201 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313205 5032 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313209 5032 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313213 5032 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313217 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313222 5032 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313225 5032 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313229 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313233 5032 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313238 5032 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313243 5032 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313249 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313253 5032 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313257 5032 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313262 5032 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313266 5032 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313269 5032 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313273 5032 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313277 5032 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313281 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313284 5032 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313288 5032 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313292 5032 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313296 5032 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313300 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313303 5032 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313307 5032 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313312 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313316 5032 feature_gate.go:330] unrecognized feature gate: Example Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313325 5032 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313330 5032 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313335 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313340 5032 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313344 5032 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313349 5032 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313353 5032 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313357 5032 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313362 5032 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313366 5032 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313370 5032 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313374 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313378 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313382 5032 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313386 5032 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313390 5032 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313394 5032 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313399 5032 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313404 5032 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313408 5032 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313412 5032 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313415 5032 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313420 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313425 5032 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313429 5032 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313433 5032 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313437 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313441 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313445 5032 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313449 5032 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313454 5032 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313458 5032 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313464 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313470 5032 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.313476 5032 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.313490 5032 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.324977 5032 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.325005 5032 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325074 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325080 5032 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325085 5032 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325090 5032 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325096 5032 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325100 5032 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325104 5032 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325108 5032 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325112 5032 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325115 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325119 5032 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325122 5032 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325126 5032 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325130 5032 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325133 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325137 5032 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325141 5032 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325144 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325148 5032 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325152 5032 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325155 5032 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325160 5032 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325167 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325172 5032 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325178 5032 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325183 5032 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325188 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325193 5032 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325198 5032 feature_gate.go:330] unrecognized feature gate: Example Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325202 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325206 5032 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325211 5032 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325215 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325219 5032 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325224 5032 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325228 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325232 5032 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325236 5032 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325239 5032 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325243 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325247 5032 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325250 5032 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325254 5032 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325258 5032 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325261 5032 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325265 5032 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325268 5032 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325272 5032 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325276 5032 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325280 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325284 5032 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325289 5032 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325293 5032 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325296 5032 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325300 5032 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325305 5032 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325308 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325312 5032 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325315 5032 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325319 5032 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325322 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325326 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325329 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325333 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325336 5032 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325340 5032 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325343 5032 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325347 5032 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325350 5032 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325354 5032 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325363 5032 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.325369 5032 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325487 5032 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325492 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325496 5032 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325500 5032 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325503 5032 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325507 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325511 5032 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325514 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325518 5032 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325524 5032 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325528 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325533 5032 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325537 5032 feature_gate.go:330] unrecognized feature gate: Example Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325541 5032 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325545 5032 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325548 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325553 5032 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325556 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325560 5032 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325564 5032 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325567 5032 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325570 5032 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325574 5032 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325577 5032 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325581 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325585 5032 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325589 5032 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325593 5032 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325597 5032 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325601 5032 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325605 5032 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325608 5032 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325613 5032 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325617 5032 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325621 5032 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325625 5032 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325629 5032 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325633 5032 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325637 5032 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325641 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325645 5032 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325649 5032 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325653 5032 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325657 5032 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325661 5032 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325664 5032 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325670 5032 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325673 5032 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325677 5032 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325681 5032 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325684 5032 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325688 5032 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325692 5032 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325695 5032 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325699 5032 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325702 5032 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325706 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325709 5032 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325713 5032 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325716 5032 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325720 5032 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325724 5032 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325727 5032 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325731 5032 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325734 5032 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325738 5032 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325742 5032 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325746 5032 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325750 5032 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325753 5032 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.325757 5032 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.325763 5032 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.326121 5032 server.go:940] "Client rotation is on, will bootstrap in background" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.329017 5032 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.329096 5032 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.329549 5032 server.go:997] "Starting client certificate rotation" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.329575 5032 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.330107 5032 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-15 17:09:32.367089601 +0000 UTC Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.330249 5032 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.335504 5032 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.337283 5032 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.337944 5032 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.349607 5032 log.go:25] "Validated CRI v1 runtime API" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.366822 5032 log.go:25] "Validated CRI v1 image API" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.369330 5032 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.373122 5032 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-22-16-00-30-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.373163 5032 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.387344 5032 manager.go:217] Machine: {Timestamp:2026-01-22 16:04:56.385975684 +0000 UTC m=+0.198144735 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:2f63932e-1307-43a5-8503-03171219a9a3 BootID:a0e083b8-b5a8-42a6-8604-3847e4e28cf7 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:0b:e4:ba Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:0b:e4:ba Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:6f:86:5a Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:3d:7e:b7 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:33:01:e1 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:f7:29:ea Speed:-1 Mtu:1496} {Name:eth10 MacAddress:d6:58:6e:f9:cb:df Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:72:fe:fb:ad:cd:05 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.387576 5032 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.387808 5032 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.388632 5032 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.388793 5032 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.388826 5032 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.389058 5032 topology_manager.go:138] "Creating topology manager with none policy" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.389068 5032 container_manager_linux.go:303] "Creating device plugin manager" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.389259 5032 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.389284 5032 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.389501 5032 state_mem.go:36] "Initialized new in-memory state store" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.390070 5032 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.390696 5032 kubelet.go:418] "Attempting to sync node with API server" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.390712 5032 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.390736 5032 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.390754 5032 kubelet.go:324] "Adding apiserver pod source" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.390768 5032 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.393150 5032 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.393234 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.393311 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.393586 5032 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.394188 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.394250 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.394456 5032 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395070 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395100 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395111 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395121 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395139 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395150 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395161 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395177 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395191 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395202 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395217 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395227 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.395431 5032 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.396042 5032 server.go:1280] "Started kubelet" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.397504 5032 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.397466 5032 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.398510 5032 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.399222 5032 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 22 16:04:56 crc systemd[1]: Started Kubernetes Kubelet. Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.402102 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.402139 5032 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.402364 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 22:54:07.420039772 +0000 UTC Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.402676 5032 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.402705 5032 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.402778 5032 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.403421 5032 server.go:460] "Adding debug handlers to kubelet server" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.403756 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="200ms" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.403776 5032 factory.go:55] Registering systemd factory Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.403817 5032 factory.go:221] Registration of the systemd container factory successfully Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.404637 5032 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.407099 5032 factory.go:153] Registering CRI-O factory Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.407118 5032 factory.go:221] Registration of the crio container factory successfully Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.407178 5032 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.407238 5032 factory.go:103] Registering Raw factory Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.407254 5032 manager.go:1196] Started watching for new ooms in manager Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.411332 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.411452 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.412013 5032 manager.go:319] Starting recovery of all containers Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.412076 5032 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188d1929e7932c62 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 16:04:56.396008546 +0000 UTC m=+0.208177587,LastTimestamp:2026-01-22 16:04:56.396008546 +0000 UTC m=+0.208177587,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418107 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418175 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418198 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418217 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418236 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418256 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418276 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418295 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418317 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418339 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418361 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418380 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418400 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418422 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.418443 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.421295 5032 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422210 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422296 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422347 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422378 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422406 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422449 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422475 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422506 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422529 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422554 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422861 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422952 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422970 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.422991 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423005 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423029 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423042 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423055 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423074 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423088 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423100 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423117 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423132 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423161 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423182 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423199 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423220 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423232 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423253 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423271 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423286 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423308 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423322 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423339 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423358 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423376 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423401 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423431 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423451 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423477 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423500 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423518 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423542 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423559 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423576 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423595 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423611 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423635 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423651 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423667 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423690 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423709 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423732 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423753 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423772 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423804 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423829 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423866 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423911 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423933 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423960 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.423986 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424009 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424030 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424047 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424071 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424090 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424111 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424127 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424143 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424196 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424212 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424234 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424250 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424267 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424287 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424303 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424322 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424342 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424356 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424377 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424393 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424409 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424434 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424455 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424478 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424495 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424511 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424530 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424558 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424577 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424600 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424620 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424636 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424657 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424677 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424694 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424716 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424734 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424753 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424766 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424794 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424808 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424829 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424842 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424857 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424899 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424914 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424933 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424946 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424960 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424977 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.424990 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425005 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425024 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425040 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425060 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425086 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425105 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425128 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425148 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425173 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425192 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425208 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425239 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425254 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425275 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425293 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425308 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425326 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425344 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425365 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425382 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425396 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425415 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425775 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425804 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425828 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425844 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425869 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425898 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425917 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425931 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.425946 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426377 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426394 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426415 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426429 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426441 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426464 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426511 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426773 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426907 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426938 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.426981 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427030 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427063 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427085 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427109 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427141 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427165 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427221 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427244 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427269 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427299 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427320 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427350 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427373 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427395 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427497 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427519 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427547 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427570 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427592 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427622 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427646 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427711 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427733 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427753 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427776 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427791 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427807 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427820 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427834 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427860 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427875 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.427986 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.428003 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.428018 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.428034 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.428047 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.428066 5032 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.428080 5032 reconstruct.go:97] "Volume reconstruction finished" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.428092 5032 reconciler.go:26] "Reconciler: start to sync state" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.436651 5032 manager.go:324] Recovery completed Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.446113 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.449742 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.449786 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.449801 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.450784 5032 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.450801 5032 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.450829 5032 state_mem.go:36] "Initialized new in-memory state store" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.451280 5032 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.453279 5032 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.453338 5032 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.453377 5032 kubelet.go:2335] "Starting kubelet main sync loop" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.453440 5032 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 22 16:04:56 crc kubenswrapper[5032]: W0122 16:04:56.454405 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.454486 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.463832 5032 policy_none.go:49] "None policy: Start" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.464773 5032 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.464800 5032 state_mem.go:35] "Initializing new in-memory state store" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.505489 5032 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.567952 5032 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.569003 5032 manager.go:334] "Starting Device Plugin manager" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.569058 5032 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.569072 5032 server.go:79] "Starting device plugin registration server" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.569549 5032 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.569572 5032 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.571918 5032 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.572087 5032 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.572103 5032 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.579324 5032 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.604964 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="400ms" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.670042 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.671558 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.671604 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.671616 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.671650 5032 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.672183 5032 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.769203 5032 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.769352 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.770647 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.770683 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.770698 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.770816 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.771122 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.771183 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772129 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772152 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772162 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772193 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772185 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772271 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772451 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772482 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.772512 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.773445 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.773481 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.773500 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.773658 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.773716 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.773734 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.773969 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.774172 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.774237 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775088 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775147 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775161 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775190 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775169 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775206 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775349 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775437 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.775475 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776379 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776400 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776411 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776381 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776447 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776456 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776779 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.776811 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.777454 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.777474 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.777482 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833530 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833576 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833600 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833626 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833647 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833667 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833686 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833847 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833962 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.833994 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.834084 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.834753 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.834834 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.834879 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.834905 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.872509 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.873664 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.873702 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.873714 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.873747 5032 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 16:04:56 crc kubenswrapper[5032]: E0122 16:04:56.874322 5032 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936029 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936225 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936254 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936350 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936389 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936416 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936411 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936488 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936511 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936539 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936615 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936631 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936658 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936677 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936714 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936738 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936745 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936756 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936765 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936802 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936811 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936800 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936906 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936831 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936935 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936958 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936977 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.936961 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.937082 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:56 crc kubenswrapper[5032]: I0122 16:04:56.937307 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.006935 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="800ms" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.110151 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.130913 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.134917 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-07bc484312ce43862f2c0cc4f8c62ec03b99bbb27ccf871d538b117b1695174f WatchSource:0}: Error finding container 07bc484312ce43862f2c0cc4f8c62ec03b99bbb27ccf871d538b117b1695174f: Status 404 returned error can't find the container with id 07bc484312ce43862f2c0cc4f8c62ec03b99bbb27ccf871d538b117b1695174f Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.148002 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.155286 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-d81b6f8a793fc0967578d75508631ef88df64461b1e6e6c9eb2771ab48c3812b WatchSource:0}: Error finding container d81b6f8a793fc0967578d75508631ef88df64461b1e6e6c9eb2771ab48c3812b: Status 404 returned error can't find the container with id d81b6f8a793fc0967578d75508631ef88df64461b1e6e6c9eb2771ab48c3812b Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.156500 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.160415 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.171805 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-d92e6b5e256eb1190feb040629366e8c5c8a1aad849cf3848ef419a119b60c83 WatchSource:0}: Error finding container d92e6b5e256eb1190feb040629366e8c5c8a1aad849cf3848ef419a119b60c83: Status 404 returned error can't find the container with id d92e6b5e256eb1190feb040629366e8c5c8a1aad849cf3848ef419a119b60c83 Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.175445 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-e1a8343c6ffa220e5488249452f1bd9b829b812a0536610a66f37ea9e9b8a0a4 WatchSource:0}: Error finding container e1a8343c6ffa220e5488249452f1bd9b829b812a0536610a66f37ea9e9b8a0a4: Status 404 returned error can't find the container with id e1a8343c6ffa220e5488249452f1bd9b829b812a0536610a66f37ea9e9b8a0a4 Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.182129 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-601808be77208bf003ad10dd0c6a8773c3b462e9ee9b060a33b9071d1a160803 WatchSource:0}: Error finding container 601808be77208bf003ad10dd0c6a8773c3b462e9ee9b060a33b9071d1a160803: Status 404 returned error can't find the container with id 601808be77208bf003ad10dd0c6a8773c3b462e9ee9b060a33b9071d1a160803 Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.274949 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.276509 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.276555 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.276567 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.276602 5032 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.277217 5032 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.400612 5032 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.403617 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 23:33:51.591359151 +0000 UTC Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.458696 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e1a8343c6ffa220e5488249452f1bd9b829b812a0536610a66f37ea9e9b8a0a4"} Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.460298 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d92e6b5e256eb1190feb040629366e8c5c8a1aad849cf3848ef419a119b60c83"} Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.464234 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d81b6f8a793fc0967578d75508631ef88df64461b1e6e6c9eb2771ab48c3812b"} Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.468326 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"07bc484312ce43862f2c0cc4f8c62ec03b99bbb27ccf871d538b117b1695174f"} Jan 22 16:04:57 crc kubenswrapper[5032]: I0122 16:04:57.469889 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"601808be77208bf003ad10dd0c6a8773c3b462e9ee9b060a33b9071d1a160803"} Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.508308 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.508419 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.563451 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.563563 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.722238 5032 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188d1929e7932c62 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 16:04:56.396008546 +0000 UTC m=+0.208177587,LastTimestamp:2026-01-22 16:04:56.396008546 +0000 UTC m=+0.208177587,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.808521 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="1.6s" Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.887166 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.887311 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:57 crc kubenswrapper[5032]: W0122 16:04:57.937147 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:57 crc kubenswrapper[5032]: E0122 16:04:57.937305 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.077929 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.080451 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.080508 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.080527 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.080575 5032 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 16:04:58 crc kubenswrapper[5032]: E0122 16:04:58.082175 5032 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.400631 5032 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.403791 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 08:36:07.390989191 +0000 UTC Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.475424 5032 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2eeff3d29c802ebedf0ba92ad6409a76c99aa3a05228b2f8e0640dcdf4324848" exitCode=0 Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.475739 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.476551 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2eeff3d29c802ebedf0ba92ad6409a76c99aa3a05228b2f8e0640dcdf4324848"} Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.477478 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.477519 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.477538 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.480729 5032 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="a65775e7159a056e766fe0a0f5aa63e72611dd5a76245ab2425b8a01851e837a" exitCode=0 Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.480820 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"a65775e7159a056e766fe0a0f5aa63e72611dd5a76245ab2425b8a01851e837a"} Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.480867 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.482454 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.482501 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.482524 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.483657 5032 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b" exitCode=0 Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.483789 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b"} Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.483910 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.484285 5032 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.484984 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.485070 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.485090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:58 crc kubenswrapper[5032]: E0122 16:04:58.485716 5032 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.489220 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"308beded30b2496fcbd8a497e1a20f6861eea210d7b9823237d21eff0ec3c154"} Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.489271 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c3f14c37e8b70e2f3c5c3eca6e2c4cd0143243c8ce3ca9fae34bc4ad6d5a0c1c"} Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.489297 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f2cf10cfa5d3f824d929ecbffda4badc3bd82ca69f42ed83f0081f13c2e05666"} Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.491308 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07" exitCode=0 Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.491363 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07"} Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.491424 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.492595 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.492639 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.492662 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.494428 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.495698 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.495754 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:58 crc kubenswrapper[5032]: I0122 16:04:58.495772 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.404393 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 13:45:40.777302131 +0000 UTC Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.498256 5032 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="4b17b19c5f7724988555cb6779374ceafbaa65f0297bc43517cd2ca93a1eb644" exitCode=0 Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.498356 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"4b17b19c5f7724988555cb6779374ceafbaa65f0297bc43517cd2ca93a1eb644"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.498806 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.503582 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.503797 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.504061 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.505062 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"bcb306a93338b2655ca9f0fb95bcebe9ea1ca81371a1f139a0cb87cd27d119ee"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.505196 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.506839 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.506887 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.506903 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.514211 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.514264 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.514288 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.514559 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.518650 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.518693 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.518711 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.524160 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ae4f1d64c22688d1cea64550c410d6d6f9d7f18bc0b3be542a5c10d675f4cc4e"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.524196 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.525529 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.525567 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.525587 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.529487 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.529535 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.529556 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.529576 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b"} Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.682875 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.684105 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.684223 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.684322 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:04:59 crc kubenswrapper[5032]: I0122 16:04:59.684453 5032 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.405052 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 12:06:58.339716766 +0000 UTC Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.537686 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372"} Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.537820 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.539628 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.539693 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.539714 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.540477 5032 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="900df79023c48f259463703c51849270bb281bb391df031bfd4c74a553b77abf" exitCode=0 Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.540576 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.540628 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.540669 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"900df79023c48f259463703c51849270bb281bb391df031bfd4c74a553b77abf"} Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.540723 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.540845 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.541071 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.541621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.541675 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.541704 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542101 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542151 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542175 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542351 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542396 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542417 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542864 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542956 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.542978 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.760635 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:00 crc kubenswrapper[5032]: I0122 16:05:00.769534 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.406252 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 17:46:22.74082385 +0000 UTC Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.555272 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fceef9cc22d592a6b1243b0890e489f4a5b0858e801dbe175522b612476b7dc3"} Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.555322 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4f2603507e22cd248d83a4069f7b8564920efb79f3d716d8fc81ed84af29c23a"} Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.555337 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"911b68f75e336c68eb9dfa5e59d7f7117f82a2cdd5f8ef60b2d56f5a8d7fabbf"} Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.555394 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.555461 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.555507 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.555505 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557301 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557348 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557362 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557311 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557406 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557489 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557622 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557675 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:01 crc kubenswrapper[5032]: I0122 16:05:01.557695 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.140127 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.407792 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 04:03:01.146854528 +0000 UTC Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.565515 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ae3bb83201fdd316ea66b74f9bd19045b7ed64e75ab80b767064974bfeae3c62"} Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.566556 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"521ad43a93b0cb8b977d3ea77c69195057fac0e12e5fc3c6c5791ba237e2a507"} Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.565809 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.565718 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.566684 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.565950 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.567993 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568050 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568070 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568466 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568489 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568508 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568573 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568620 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.568638 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:02 crc kubenswrapper[5032]: I0122 16:05:02.860119 5032 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.409030 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 18:32:03.464400934 +0000 UTC Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.410180 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.569185 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.569186 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.570548 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.570604 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.570620 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.570964 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.571018 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.571039 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.626565 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 22 16:05:03 crc kubenswrapper[5032]: I0122 16:05:03.738427 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.297011 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.410145 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 22:46:31.105614257 +0000 UTC Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.572615 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.572811 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.573915 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.574011 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.574033 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.574312 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.574339 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.574348 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.989012 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.989194 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.989255 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.990655 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.990696 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:04 crc kubenswrapper[5032]: I0122 16:05:04.990708 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:05 crc kubenswrapper[5032]: I0122 16:05:05.411172 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 05:21:54.451982872 +0000 UTC Jan 22 16:05:05 crc kubenswrapper[5032]: I0122 16:05:05.575572 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:05 crc kubenswrapper[5032]: I0122 16:05:05.576599 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:05 crc kubenswrapper[5032]: I0122 16:05:05.576656 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:05 crc kubenswrapper[5032]: I0122 16:05:05.576674 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:06 crc kubenswrapper[5032]: I0122 16:05:06.411971 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 10:56:23.516446028 +0000 UTC Jan 22 16:05:06 crc kubenswrapper[5032]: E0122 16:05:06.579531 5032 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.130798 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.131117 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.131176 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.133117 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.133208 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.133228 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.135415 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.412947 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 04:01:59.860325318 +0000 UTC Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.580798 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.581988 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.582063 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.582088 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.989704 5032 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded" start-of-body= Jan 22 16:05:07 crc kubenswrapper[5032]: I0122 16:05:07.989914 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded" Jan 22 16:05:08 crc kubenswrapper[5032]: I0122 16:05:08.413523 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 14:33:59.674392562 +0000 UTC Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.068125 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.069774 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.074989 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.075104 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.075136 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.401066 5032 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 22 16:05:09 crc kubenswrapper[5032]: E0122 16:05:09.410617 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.414714 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 07:08:45.658100954 +0000 UTC Jan 22 16:05:09 crc kubenswrapper[5032]: W0122 16:05:09.618070 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 22 16:05:09 crc kubenswrapper[5032]: I0122 16:05:09.618416 5032 trace.go:236] Trace[1448639278]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 16:04:59.616) (total time: 10001ms): Jan 22 16:05:09 crc kubenswrapper[5032]: Trace[1448639278]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:05:09.618) Jan 22 16:05:09 crc kubenswrapper[5032]: Trace[1448639278]: [10.001755997s] [10.001755997s] END Jan 22 16:05:09 crc kubenswrapper[5032]: E0122 16:05:09.618562 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 22 16:05:09 crc kubenswrapper[5032]: E0122 16:05:09.686139 5032 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Jan 22 16:05:10 crc kubenswrapper[5032]: W0122 16:05:10.224703 5032 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 22 16:05:10 crc kubenswrapper[5032]: I0122 16:05:10.224841 5032 trace.go:236] Trace[1743758794]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 16:05:00.222) (total time: 10002ms): Jan 22 16:05:10 crc kubenswrapper[5032]: Trace[1743758794]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (16:05:10.224) Jan 22 16:05:10 crc kubenswrapper[5032]: Trace[1743758794]: [10.002564889s] [10.002564889s] END Jan 22 16:05:10 crc kubenswrapper[5032]: E0122 16:05:10.224920 5032 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 22 16:05:10 crc kubenswrapper[5032]: I0122 16:05:10.375998 5032 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 22 16:05:10 crc kubenswrapper[5032]: I0122 16:05:10.376083 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 22 16:05:10 crc kubenswrapper[5032]: I0122 16:05:10.396583 5032 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 22 16:05:10 crc kubenswrapper[5032]: I0122 16:05:10.396691 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 22 16:05:10 crc kubenswrapper[5032]: I0122 16:05:10.415437 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 08:46:18.422234829 +0000 UTC Jan 22 16:05:11 crc kubenswrapper[5032]: I0122 16:05:11.415960 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 01:44:42.746501529 +0000 UTC Jan 22 16:05:12 crc kubenswrapper[5032]: I0122 16:05:12.416366 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 06:15:17.42357161 +0000 UTC Jan 22 16:05:12 crc kubenswrapper[5032]: I0122 16:05:12.887240 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:12 crc kubenswrapper[5032]: I0122 16:05:12.888804 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:12 crc kubenswrapper[5032]: I0122 16:05:12.888849 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:12 crc kubenswrapper[5032]: I0122 16:05:12.888881 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:12 crc kubenswrapper[5032]: I0122 16:05:12.888932 5032 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 16:05:12 crc kubenswrapper[5032]: E0122 16:05:12.893267 5032 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.416752 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 06:42:08.803112897 +0000 UTC Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.647851 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.648099 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.649299 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.649426 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.649502 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.657972 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.747267 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.747567 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.749141 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.749210 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.749233 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:13 crc kubenswrapper[5032]: I0122 16:05:13.753394 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.417479 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 10:54:33.631546176 +0000 UTC Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.601714 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.601772 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.601802 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.603225 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.603286 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.603305 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.603360 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.603418 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:14 crc kubenswrapper[5032]: I0122 16:05:14.603437 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.307293 5032 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.378921 5032 trace.go:236] Trace[1110189883]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 16:05:00.568) (total time: 14809ms): Jan 22 16:05:15 crc kubenswrapper[5032]: Trace[1110189883]: ---"Objects listed" error: 14809ms (16:05:15.378) Jan 22 16:05:15 crc kubenswrapper[5032]: Trace[1110189883]: [14.809806037s] [14.809806037s] END Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.378976 5032 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.379900 5032 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.381909 5032 trace.go:236] Trace[1407985249]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 16:05:00.461) (total time: 14920ms): Jan 22 16:05:15 crc kubenswrapper[5032]: Trace[1407985249]: ---"Objects listed" error: 14920ms (16:05:15.381) Jan 22 16:05:15 crc kubenswrapper[5032]: Trace[1407985249]: [14.920125661s] [14.920125661s] END Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.381935 5032 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.410032 5032 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.417908 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 13:02:32.508897226 +0000 UTC Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.455241 5032 csr.go:261] certificate signing request csr-2fswz is approved, waiting to be issued Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.464102 5032 csr.go:257] certificate signing request csr-2fswz is issued Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.527980 5032 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37216->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.528003 5032 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37230->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.528055 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37216->192.168.126.11:17697: read: connection reset by peer" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.528092 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37230->192.168.126.11:17697: read: connection reset by peer" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.528465 5032 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.528494 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.571901 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.572128 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.573448 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.573487 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.573501 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.579955 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.605756 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.607610 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372" exitCode=255 Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.607652 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372"} Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.607758 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.607777 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.608713 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.608747 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.608761 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.608722 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.608933 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.608953 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:15 crc kubenswrapper[5032]: I0122 16:05:15.609788 5032 scope.go:117] "RemoveContainer" containerID="5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.307374 5032 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.330823 5032 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 22 16:05:16 crc kubenswrapper[5032]: W0122 16:05:16.331048 5032 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.RuntimeClass ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 22 16:05:16 crc kubenswrapper[5032]: W0122 16:05:16.331085 5032 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Node ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 22 16:05:16 crc kubenswrapper[5032]: W0122 16:05:16.331476 5032 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.CSIDriver ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.403252 5032 apiserver.go:52] "Watching apiserver" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.406101 5032 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.406324 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-rbbd6","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.406631 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.406960 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.407020 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.407111 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.407183 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.407271 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.407309 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.407627 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.407751 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.407822 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.410101 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.410260 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.410300 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.411317 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.411372 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.411469 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.411529 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.411562 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.411678 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.411812 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.412252 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.413653 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.418233 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 12:26:25.568347721 +0000 UTC Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.435409 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.452692 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.465090 5032 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-22 16:00:15 +0000 UTC, rotation deadline is 2026-10-29 23:20:44.326207309 +0000 UTC Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.465591 5032 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6727h15m27.86062226s for next certificate rotation Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.480428 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.491247 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.499979 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.504018 5032 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.508746 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.518443 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.529573 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.548848 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.571835 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.585883 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.585929 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.585952 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.585974 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.585993 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586010 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586029 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586046 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586066 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586085 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586131 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586149 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586170 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586187 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586203 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586220 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586239 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586287 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586306 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586329 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586348 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586368 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586388 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586409 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586428 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586449 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586477 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586515 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586534 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586553 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586574 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586600 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586618 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586637 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586656 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586676 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586697 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586716 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586733 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586751 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586769 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586787 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586805 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586822 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586842 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586878 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586902 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586926 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586947 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.586985 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587003 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587020 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587037 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587054 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587071 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587088 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587107 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587129 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587146 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587163 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587184 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587217 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587236 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587256 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587273 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587294 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587313 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587303 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587332 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587439 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587479 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587506 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587527 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587548 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587572 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587609 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587628 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587650 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587675 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587700 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587720 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587739 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587762 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587785 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587808 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587829 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587847 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587891 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590799 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587717 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588158 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588112 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588530 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588692 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588740 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588739 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588817 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588822 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.588918 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589067 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589082 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589383 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589417 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589422 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589660 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589676 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589717 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589702 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589738 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589973 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589974 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.589991 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590048 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590221 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590326 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590392 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590452 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590499 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590657 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.590676 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.591058 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.591121 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.591452 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.591537 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.591573 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.591676 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.592831 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.591996 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.592018 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.592089 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.592601 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.592826 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.593895 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.593945 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594092 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594225 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594337 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594436 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.587909 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594786 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594891 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595013 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595100 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595180 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595249 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595324 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595406 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595488 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595564 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595641 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595713 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595791 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595880 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595971 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596041 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596109 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596181 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596252 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596325 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596393 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596458 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596528 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596597 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596672 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596738 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596807 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596898 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596977 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597052 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597123 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597200 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597275 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597347 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597418 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597483 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597557 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597632 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597705 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597777 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597865 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597994 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598075 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598174 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598266 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598346 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598424 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598496 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598570 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598643 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598715 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598785 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598946 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599362 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599445 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599522 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599591 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599663 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599743 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599824 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600121 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600205 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600279 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600354 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600443 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600521 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600601 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600680 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600762 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600839 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600927 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600999 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601080 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601163 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601242 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601317 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601392 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601469 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601550 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601631 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601712 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601788 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601882 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.601998 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602079 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602176 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602360 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602440 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602520 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602597 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602677 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602758 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602831 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602930 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603010 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603409 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603493 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603573 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603658 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603732 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603802 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604084 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604153 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604190 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604227 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604260 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604297 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604384 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604440 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604476 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604513 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604546 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604589 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604629 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604675 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0fa8611e-686b-432f-8871-e6b5645e3108-hosts-file\") pod \"node-resolver-rbbd6\" (UID: \"0fa8611e-686b-432f-8871-e6b5645e3108\") " pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604704 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pkf6\" (UniqueName: \"kubernetes.io/projected/0fa8611e-686b-432f-8871-e6b5645e3108-kube-api-access-2pkf6\") pod \"node-resolver-rbbd6\" (UID: \"0fa8611e-686b-432f-8871-e6b5645e3108\") " pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604745 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604784 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604814 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604849 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604962 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604989 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605013 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605115 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605137 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605155 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605187 5032 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605199 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605210 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605222 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605235 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605247 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605257 5032 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605272 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605283 5032 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605295 5032 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605306 5032 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605318 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605328 5032 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605843 5032 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605878 5032 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605891 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605905 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605918 5032 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605931 5032 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605943 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605958 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605969 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605980 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605991 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606002 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606013 5032 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606027 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606040 5032 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606053 5032 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606064 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606075 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606086 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606098 5032 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606111 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606122 5032 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606132 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606143 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606173 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606187 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606202 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606214 5032 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606227 5032 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606238 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606249 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606263 5032 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606274 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606284 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613054 5032 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613915 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615582 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594533 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615941 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.594776 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595138 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595189 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595366 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595348 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595534 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595541 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.595685 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596022 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596280 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.596786 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.597997 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.598571 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599014 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599095 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599379 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599827 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599853 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.599903 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600113 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.600721 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602631 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.602917 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603222 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.603441 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604016 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.604072 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605255 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605463 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.605933 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606016 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606027 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606238 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606334 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.606358 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.616642 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:17.116614724 +0000 UTC m=+20.928783795 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.606739 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.607053 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.607082 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.607171 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.607586 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.607598 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.607716 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.608049 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.608176 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.608341 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.608278 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.609936 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.610279 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.610610 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.610722 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.610804 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.611008 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.611370 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.611440 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.617407 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.611609 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.611879 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.611916 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.612343 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.612405 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.612428 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.612629 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.612696 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613066 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613096 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613241 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613354 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613374 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613597 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613676 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613818 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613903 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.613917 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.614110 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.614517 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.614772 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.614831 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.614829 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.615005 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.618445 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615141 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615540 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615708 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.618517 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615754 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615927 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.615960 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.616412 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.616986 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.618591 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.617047 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.618160 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.618437 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.618550 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:17.118517691 +0000 UTC m=+20.930686722 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.618940 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.619425 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.619533 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.619978 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:05:17.119948596 +0000 UTC m=+20.932117637 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620021 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620339 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620471 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620552 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620742 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620673 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620808 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.620908 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621086 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621111 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621160 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621324 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621365 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621391 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621405 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621532 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621698 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.621757 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.629723 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.629846 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.630335 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.630589 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.632581 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.632633 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.632666 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.632752 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.632906 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.633231 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.633753 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.633791 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.633806 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.634526 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.637351 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:17.135669149 +0000 UTC m=+20.947838180 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.637408 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.637644 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.638953 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:16 crc kubenswrapper[5032]: E0122 16:05:16.639404 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:17.139178035 +0000 UTC m=+20.951347066 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.639564 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.639749 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.641230 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.641889 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.645743 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.649414 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.649707 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.649717 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.662503 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.677693 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.678034 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.683513 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0"} Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.684130 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.684960 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.697526 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.698077 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.698093 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.699014 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.699141 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.701224 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.704247 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.704448 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.705080 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707021 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707345 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707384 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0fa8611e-686b-432f-8871-e6b5645e3108-hosts-file\") pod \"node-resolver-rbbd6\" (UID: \"0fa8611e-686b-432f-8871-e6b5645e3108\") " pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707404 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pkf6\" (UniqueName: \"kubernetes.io/projected/0fa8611e-686b-432f-8871-e6b5645e3108-kube-api-access-2pkf6\") pod \"node-resolver-rbbd6\" (UID: \"0fa8611e-686b-432f-8871-e6b5645e3108\") " pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707428 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707484 5032 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707496 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707506 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707516 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707525 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707534 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707545 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707556 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707566 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707575 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707586 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707597 5032 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707608 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707619 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707630 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707640 5032 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707650 5032 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707660 5032 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707670 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707679 5032 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707689 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707700 5032 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707710 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707720 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707729 5032 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707738 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707750 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707758 5032 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707768 5032 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707777 5032 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707786 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707796 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707805 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707815 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707827 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707837 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.707847 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708100 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708112 5032 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708123 5032 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708133 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708142 5032 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708151 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708160 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708168 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708178 5032 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708186 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708198 5032 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708206 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708217 5032 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708225 5032 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708234 5032 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708244 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708254 5032 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708263 5032 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708274 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708283 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708292 5032 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708302 5032 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708317 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708326 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708335 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708345 5032 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708354 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708364 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708374 5032 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708382 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708391 5032 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708400 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708409 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708395 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708430 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708456 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708419 5032 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708512 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708518 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0fa8611e-686b-432f-8871-e6b5645e3108-hosts-file\") pod \"node-resolver-rbbd6\" (UID: \"0fa8611e-686b-432f-8871-e6b5645e3108\") " pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708528 5032 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708584 5032 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708575 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708598 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708612 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708625 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708639 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708651 5032 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708663 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708673 5032 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708683 5032 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708692 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708702 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708712 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708722 5032 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708733 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708742 5032 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708752 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708762 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708773 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708783 5032 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708793 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708802 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708810 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708821 5032 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708883 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708893 5032 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708903 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708912 5032 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708923 5032 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708934 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708945 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708955 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708968 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708978 5032 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708987 5032 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.708996 5032 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709007 5032 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709018 5032 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709027 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709037 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709047 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709056 5032 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709066 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709076 5032 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709086 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709095 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709106 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709117 5032 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709127 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709138 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709149 5032 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709158 5032 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709171 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709181 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709191 5032 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709201 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709212 5032 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709223 5032 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709234 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709244 5032 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709255 5032 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709266 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709277 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709288 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709301 5032 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709311 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709653 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.709667 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.713474 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.721118 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.721639 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.724587 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.726940 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.727316 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pkf6\" (UniqueName: \"kubernetes.io/projected/0fa8611e-686b-432f-8871-e6b5645e3108-kube-api-access-2pkf6\") pod \"node-resolver-rbbd6\" (UID: \"0fa8611e-686b-432f-8871-e6b5645e3108\") " pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.735842 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.747157 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.747368 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.750615 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.752127 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.754488 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rbbd6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.760020 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.766242 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.773757 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: W0122 16:05:16.773946 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-7831ce9f02c41fc4324732db9ca66ba0e63a50be8c321da1a695dc4b5e944efb WatchSource:0}: Error finding container 7831ce9f02c41fc4324732db9ca66ba0e63a50be8c321da1a695dc4b5e944efb: Status 404 returned error can't find the container with id 7831ce9f02c41fc4324732db9ca66ba0e63a50be8c321da1a695dc4b5e944efb Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.790478 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.804366 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.810190 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.810211 5032 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.810222 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.810234 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.810245 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.810254 5032 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.846729 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-zfz9c"] Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.847059 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.847445 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-6qsfs"] Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.847970 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.857646 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.857929 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.858113 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.858266 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.858385 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.858496 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.858608 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.861070 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.871391 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.871554 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.880002 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.891321 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.906555 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:15Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 16:05:10.154586 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:10.156161 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1507857052/tls.crt::/tmp/serving-cert-1507857052/tls.key\\\\\\\"\\\\nI0122 16:05:15.505329 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:15.509498 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:15.509532 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:15.509583 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:15.509595 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:15.519018 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:15.519052 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519060 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519070 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:15.519078 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:15.519084 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:15.519092 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:15.519325 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:15.521601 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910472 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d111dfc5-e30e-477d-81c1-afcc5bd064ad-cni-binary-copy\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910618 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-etc-kubernetes\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910659 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-cnibin\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910674 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-k8s-cni-cncf-io\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910701 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-conf-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910716 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-os-release\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910730 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-socket-dir-parent\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910745 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-multus-certs\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910784 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fee2227c-a587-4ec3-909d-bd355aa116d4-rootfs\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910801 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-netns\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910831 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-hostroot\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910849 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bthxt\" (UniqueName: \"kubernetes.io/projected/fee2227c-a587-4ec3-909d-bd355aa116d4-kube-api-access-bthxt\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910880 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-system-cni-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910894 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-cni-bin\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910909 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-cni-multus\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910924 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fee2227c-a587-4ec3-909d-bd355aa116d4-proxy-tls\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910943 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fee2227c-a587-4ec3-909d-bd355aa116d4-mcd-auth-proxy-config\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910964 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-daemon-config\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910980 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cr55\" (UniqueName: \"kubernetes.io/projected/d111dfc5-e30e-477d-81c1-afcc5bd064ad-kube-api-access-5cr55\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.910996 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-cni-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.911013 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-kubelet\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.918028 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.932707 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.971360 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:16 crc kubenswrapper[5032]: I0122 16:05:16.987503 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.007941 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019334 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-multus-certs\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019387 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-os-release\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019426 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-socket-dir-parent\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019450 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fee2227c-a587-4ec3-909d-bd355aa116d4-rootfs\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019475 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-netns\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019520 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-hostroot\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019543 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-cni-bin\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019564 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-cni-multus\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019584 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fee2227c-a587-4ec3-909d-bd355aa116d4-proxy-tls\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019608 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fee2227c-a587-4ec3-909d-bd355aa116d4-mcd-auth-proxy-config\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019628 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bthxt\" (UniqueName: \"kubernetes.io/projected/fee2227c-a587-4ec3-909d-bd355aa116d4-kube-api-access-bthxt\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019647 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-system-cni-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019667 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-daemon-config\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019692 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cr55\" (UniqueName: \"kubernetes.io/projected/d111dfc5-e30e-477d-81c1-afcc5bd064ad-kube-api-access-5cr55\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019715 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-cni-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019734 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-kubelet\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019755 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-etc-kubernetes\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019795 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d111dfc5-e30e-477d-81c1-afcc5bd064ad-cni-binary-copy\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019827 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-cnibin\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019882 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-k8s-cni-cncf-io\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.019905 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-conf-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020332 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-multus-certs\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020456 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-os-release\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020638 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-socket-dir-parent\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020679 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fee2227c-a587-4ec3-909d-bd355aa116d4-rootfs\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020712 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-netns\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020747 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-hostroot\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020783 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-cni-bin\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020808 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-conf-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020820 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-cni-multus\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.020986 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-cni-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.021047 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fee2227c-a587-4ec3-909d-bd355aa116d4-mcd-auth-proxy-config\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.021136 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-var-lib-kubelet\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.021176 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-etc-kubernetes\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.021217 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-system-cni-dir\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.021724 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d111dfc5-e30e-477d-81c1-afcc5bd064ad-cni-binary-copy\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.021787 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-cnibin\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.021820 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d111dfc5-e30e-477d-81c1-afcc5bd064ad-host-run-k8s-cni-cncf-io\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.022508 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d111dfc5-e30e-477d-81c1-afcc5bd064ad-multus-daemon-config\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.025358 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fee2227c-a587-4ec3-909d-bd355aa116d4-proxy-tls\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.031734 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.050537 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bthxt\" (UniqueName: \"kubernetes.io/projected/fee2227c-a587-4ec3-909d-bd355aa116d4-kube-api-access-bthxt\") pod \"machine-config-daemon-6qsfs\" (UID: \"fee2227c-a587-4ec3-909d-bd355aa116d4\") " pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.050763 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.053587 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cr55\" (UniqueName: \"kubernetes.io/projected/d111dfc5-e30e-477d-81c1-afcc5bd064ad-kube-api-access-5cr55\") pod \"multus-zfz9c\" (UID: \"d111dfc5-e30e-477d-81c1-afcc5bd064ad\") " pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.075812 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.086446 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.100789 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.116209 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.120659 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.120755 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.120776 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.120892 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:05:18.120843488 +0000 UTC m=+21.933012519 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.120903 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.120976 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:18.120968701 +0000 UTC m=+21.933137722 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.120901 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.121032 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:18.121024132 +0000 UTC m=+21.933193163 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.126943 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.137395 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:15Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 16:05:10.154586 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:10.156161 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1507857052/tls.crt::/tmp/serving-cert-1507857052/tls.key\\\\\\\"\\\\nI0122 16:05:15.505329 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:15.509498 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:15.509532 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:15.509583 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:15.509595 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:15.519018 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:15.519052 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519060 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519070 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:15.519078 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:15.519084 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:15.519092 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:15.519325 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:15.521601 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.146791 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.154914 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.166466 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-zfz9c" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.167270 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.176401 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:05:17 crc kubenswrapper[5032]: W0122 16:05:17.176977 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd111dfc5_e30e_477d_81c1_afcc5bd064ad.slice/crio-d3b14d4f07f0989f00639ebea536bc944af311304c474429e4dfb5cda43819e9 WatchSource:0}: Error finding container d3b14d4f07f0989f00639ebea536bc944af311304c474429e4dfb5cda43819e9: Status 404 returned error can't find the container with id d3b14d4f07f0989f00639ebea536bc944af311304c474429e4dfb5cda43819e9 Jan 22 16:05:17 crc kubenswrapper[5032]: W0122 16:05:17.190704 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfee2227c_a587_4ec3_909d_bd355aa116d4.slice/crio-1b7a2599e34ed3ddcfeed5d765c2ea7f836ad65ad83edda3147ca603bee1e6f9 WatchSource:0}: Error finding container 1b7a2599e34ed3ddcfeed5d765c2ea7f836ad65ad83edda3147ca603bee1e6f9: Status 404 returned error can't find the container with id 1b7a2599e34ed3ddcfeed5d765c2ea7f836ad65ad83edda3147ca603bee1e6f9 Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.221653 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.221719 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.221893 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.221920 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.221936 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.221990 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.222029 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.222042 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.221998 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:18.221979155 +0000 UTC m=+22.034148186 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.222122 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:18.222101038 +0000 UTC m=+22.034270069 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.251701 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5q2w8"] Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.252663 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-5gn8h"] Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.252850 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.254329 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257087 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257345 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257476 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257600 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257619 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257702 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257927 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.257937 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.258268 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.271311 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.296370 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.309418 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.322984 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b1410b11-001c-4b31-ab18-02e37bc11454-cni-binary-copy\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323032 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-systemd-units\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323058 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-var-lib-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323078 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-log-socket\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323138 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-system-cni-dir\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323326 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-cnibin\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323468 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323515 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovn-node-metrics-cert\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323545 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-netns\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323624 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-bin\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323726 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-node-log\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323786 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psnqf\" (UniqueName: \"kubernetes.io/projected/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-kube-api-access-psnqf\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323842 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-kubelet\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323918 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.323967 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-systemd\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324000 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-netd\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324056 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-script-lib\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324092 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-slash\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324109 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-ovn-kubernetes\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324128 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-tuning-conf-dir\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324245 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-etc-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324325 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-ovn\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324352 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-config\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324371 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-os-release\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324391 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b1410b11-001c-4b31-ab18-02e37bc11454-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324405 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-env-overrides\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.324422 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22dpc\" (UniqueName: \"kubernetes.io/projected/b1410b11-001c-4b31-ab18-02e37bc11454-kube-api-access-22dpc\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.327748 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.339531 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.361950 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.385697 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.405959 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:15Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 16:05:10.154586 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:10.156161 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1507857052/tls.crt::/tmp/serving-cert-1507857052/tls.key\\\\\\\"\\\\nI0122 16:05:15.505329 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:15.509498 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:15.509532 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:15.509583 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:15.509595 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:15.519018 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:15.519052 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519060 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519070 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:15.519078 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:15.519084 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:15.519092 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:15.519325 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:15.521601 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.418752 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 22:07:53.909999129 +0000 UTC Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.421322 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425485 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-log-socket\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425535 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-system-cni-dir\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425620 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-system-cni-dir\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425638 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-log-socket\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425671 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-cnibin\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425695 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-var-lib-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425724 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425743 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-netns\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425761 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-bin\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425777 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovn-node-metrics-cert\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425797 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-node-log\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425816 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425848 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-kubelet\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425763 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-cnibin\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425907 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-node-log\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425827 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-kubelet\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425909 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-netns\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425973 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psnqf\" (UniqueName: \"kubernetes.io/projected/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-kube-api-access-psnqf\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425937 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-bin\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426017 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-systemd\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.425796 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-var-lib-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426047 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-netd\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426060 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-systemd\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426085 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426124 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-slash\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426153 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-ovn-kubernetes\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426095 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-netd\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426176 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-script-lib\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426198 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426216 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-tuning-conf-dir\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426231 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-ovn-kubernetes\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426260 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-slash\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426276 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-etc-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426306 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-config\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426329 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-os-release\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426352 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b1410b11-001c-4b31-ab18-02e37bc11454-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426383 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-ovn\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426410 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-env-overrides\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426411 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-etc-openvswitch\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426435 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22dpc\" (UniqueName: \"kubernetes.io/projected/b1410b11-001c-4b31-ab18-02e37bc11454-kube-api-access-22dpc\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426503 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-systemd-units\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426503 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-ovn\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426522 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-tuning-conf-dir\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426534 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b1410b11-001c-4b31-ab18-02e37bc11454-cni-binary-copy\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.426641 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b1410b11-001c-4b31-ab18-02e37bc11454-os-release\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.427025 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-systemd-units\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.427188 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b1410b11-001c-4b31-ab18-02e37bc11454-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.427442 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-config\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.427515 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-env-overrides\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.427974 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-script-lib\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.430497 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b1410b11-001c-4b31-ab18-02e37bc11454-cni-binary-copy\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.431351 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovn-node-metrics-cert\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.437660 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.443282 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psnqf\" (UniqueName: \"kubernetes.io/projected/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-kube-api-access-psnqf\") pod \"ovnkube-node-5q2w8\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.446090 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22dpc\" (UniqueName: \"kubernetes.io/projected/b1410b11-001c-4b31-ab18-02e37bc11454-kube-api-access-22dpc\") pod \"multus-additional-cni-plugins-5gn8h\" (UID: \"b1410b11-001c-4b31-ab18-02e37bc11454\") " pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.453876 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.454006 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.457447 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.470951 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.482627 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.496143 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.514045 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:15Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 16:05:10.154586 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:10.156161 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1507857052/tls.crt::/tmp/serving-cert-1507857052/tls.key\\\\\\\"\\\\nI0122 16:05:15.505329 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:15.509498 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:15.509532 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:15.509583 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:15.509595 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:15.519018 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:15.519052 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519060 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519070 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:15.519078 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:15.519084 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:15.519092 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:15.519325 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:15.521601 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.554551 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.566474 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.574837 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" Jan 22 16:05:17 crc kubenswrapper[5032]: W0122 16:05:17.593798 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1410b11_001c_4b31_ab18_02e37bc11454.slice/crio-1119176183dd9b5850d9ac7698ecb1955fe14a6abdefd4f0a40ff04278891ae2 WatchSource:0}: Error finding container 1119176183dd9b5850d9ac7698ecb1955fe14a6abdefd4f0a40ff04278891ae2: Status 404 returned error can't find the container with id 1119176183dd9b5850d9ac7698ecb1955fe14a6abdefd4f0a40ff04278891ae2 Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.601309 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.632777 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.675815 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.692935 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.693003 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.693019 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"7831ce9f02c41fc4324732db9ca66ba0e63a50be8c321da1a695dc4b5e944efb"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.696399 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.696457 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"addd1e2eafaba1c9aabb8652481ec0f7d8e59540b84f5abc8c0d39ca2c0ae7d2"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.700849 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"fabc7337765c3f612d599754eb42ebdca39b3625135a9fe3c272f31428f3246e"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.701589 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0c5151d4c182634779767ca8d9f23e19444aa40a147e999d69ba553c251c0424"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.704305 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rbbd6" event={"ID":"0fa8611e-686b-432f-8871-e6b5645e3108","Type":"ContainerStarted","Data":"a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.704365 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rbbd6" event={"ID":"0fa8611e-686b-432f-8871-e6b5645e3108","Type":"ContainerStarted","Data":"4554fae38a48523c46eb641e5d153cd725ae03cb76b38cbd4aa7201a5cd369f6"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.711011 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.711044 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.711053 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"1b7a2599e34ed3ddcfeed5d765c2ea7f836ad65ad83edda3147ca603bee1e6f9"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.719262 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerStarted","Data":"c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.719357 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerStarted","Data":"d3b14d4f07f0989f00639ebea536bc944af311304c474429e4dfb5cda43819e9"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.721189 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.726158 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.727749 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.732764 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0" exitCode=255 Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.732845 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.732933 5032 scope.go:117] "RemoveContainer" containerID="5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.733346 5032 scope.go:117] "RemoveContainer" containerID="4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0" Jan 22 16:05:17 crc kubenswrapper[5032]: E0122 16:05:17.733517 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.735755 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerStarted","Data":"1119176183dd9b5850d9ac7698ecb1955fe14a6abdefd4f0a40ff04278891ae2"} Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.754583 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.795000 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.839322 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.877264 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.924591 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.959053 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:15Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 16:05:10.154586 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:10.156161 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1507857052/tls.crt::/tmp/serving-cert-1507857052/tls.key\\\\\\\"\\\\nI0122 16:05:15.505329 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:15.509498 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:15.509532 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:15.509583 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:15.509595 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:15.519018 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:15.519052 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519060 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519070 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:15.519078 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:15.519084 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:15.519092 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:15.519325 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:15.521601 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:17 crc kubenswrapper[5032]: I0122 16:05:17.995923 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:17Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.075071 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.091295 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.118006 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.133473 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.133613 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.133714 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:05:20.13367952 +0000 UTC m=+23.945848541 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.133778 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.133942 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.133963 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:20.133951986 +0000 UTC m=+23.946121017 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.134038 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.134166 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:20.134142131 +0000 UTC m=+23.946311162 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.159580 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.195702 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.235320 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235622 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235667 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.235673 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235684 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235764 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:20.23573889 +0000 UTC m=+24.047907921 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235812 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235833 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235845 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.235916 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:20.235899034 +0000 UTC m=+24.048068055 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.236722 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.274052 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.317581 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.419755 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 17:53:44.009605002 +0000 UTC Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.444031 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-th95w"] Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.444546 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.447394 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.447431 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.448028 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.448287 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.454873 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.454964 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.455320 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.455386 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.457907 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.458636 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.461565 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.462245 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.463427 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.464039 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.464628 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.466500 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.467194 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.467639 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.469137 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.469713 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.470928 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.471628 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.473672 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.474619 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.475245 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.476444 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.476852 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.478244 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.480687 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.481203 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.482314 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.482743 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.483779 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.484301 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.484944 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.486044 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.486536 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.487654 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.488289 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.488622 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a1952c0bb270dbd8a65a39dd970b4383344f89dceeec3ba63bb8e5199cc5372\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:15Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 16:05:10.154586 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:10.156161 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1507857052/tls.crt::/tmp/serving-cert-1507857052/tls.key\\\\\\\"\\\\nI0122 16:05:15.505329 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:15.509498 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:15.509532 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:15.509583 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:15.509595 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:15.519018 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:15.519052 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519060 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:15.519070 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:15.519078 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:15.519084 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:15.519092 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:15.519325 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:15.521601 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.490577 5032 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.490690 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.492362 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.493661 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.494207 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.495838 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.496504 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.497454 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.498169 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.499208 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.499700 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.501141 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.502132 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.503329 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.504072 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.505235 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.506044 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.507482 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.508100 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.509932 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.510455 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.511703 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.512509 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.513326 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.518462 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.538089 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a49ee60-0dea-4adf-bcf7-d5717c64218e-host\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.538594 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rccfg\" (UniqueName: \"kubernetes.io/projected/4a49ee60-0dea-4adf-bcf7-d5717c64218e-kube-api-access-rccfg\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.538633 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4a49ee60-0dea-4adf-bcf7-d5717c64218e-serviceca\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.555779 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.597989 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.634883 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.639362 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a49ee60-0dea-4adf-bcf7-d5717c64218e-host\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.639430 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rccfg\" (UniqueName: \"kubernetes.io/projected/4a49ee60-0dea-4adf-bcf7-d5717c64218e-kube-api-access-rccfg\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.639486 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4a49ee60-0dea-4adf-bcf7-d5717c64218e-serviceca\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.639507 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a49ee60-0dea-4adf-bcf7-d5717c64218e-host\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.641446 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4a49ee60-0dea-4adf-bcf7-d5717c64218e-serviceca\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.690543 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rccfg\" (UniqueName: \"kubernetes.io/projected/4a49ee60-0dea-4adf-bcf7-d5717c64218e-kube-api-access-rccfg\") pod \"node-ca-th95w\" (UID: \"4a49ee60-0dea-4adf-bcf7-d5717c64218e\") " pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.698833 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.733809 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.740791 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.743260 5032 scope.go:117] "RemoveContainer" containerID="4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0" Jan 22 16:05:18 crc kubenswrapper[5032]: E0122 16:05:18.743561 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.743996 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446" exitCode=0 Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.744104 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.746760 5032 generic.go:334] "Generic (PLEG): container finished" podID="b1410b11-001c-4b31-ab18-02e37bc11454" containerID="1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41" exitCode=0 Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.746806 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerDied","Data":"1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41"} Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.757084 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-th95w" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.781711 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: W0122 16:05:18.789714 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a49ee60_0dea_4adf_bcf7_d5717c64218e.slice/crio-a3ebedf50381412de156a5319d0d6787c3c34e834bc07631638b069b5ee2b4ed WatchSource:0}: Error finding container a3ebedf50381412de156a5319d0d6787c3c34e834bc07631638b069b5ee2b4ed: Status 404 returned error can't find the container with id a3ebedf50381412de156a5319d0d6787c3c34e834bc07631638b069b5ee2b4ed Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.817524 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.864653 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.895964 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.940133 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:18 crc kubenswrapper[5032]: I0122 16:05:18.973531 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:18Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.015703 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.056112 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.097715 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.135834 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.185706 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.247015 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.272671 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.294281 5032 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.294807 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.296537 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.296583 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.296598 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.296714 5032 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.369316 5032 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.369622 5032 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.376277 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.381070 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.381118 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.381129 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.381148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.381160 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.420224 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 22:37:38.88776318 +0000 UTC Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.427959 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.459123 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:19 crc kubenswrapper[5032]: E0122 16:05:19.459303 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:19 crc kubenswrapper[5032]: E0122 16:05:19.461565 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.466840 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.467940 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.467985 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.467995 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.468014 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.468024 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: E0122 16:05:19.482908 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.490076 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.490102 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.490113 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.490131 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.490142 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.497565 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: E0122 16:05:19.504843 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.509270 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.509317 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.509330 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.509348 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.509360 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: E0122 16:05:19.522362 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.525779 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.525819 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.525829 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.525846 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.525872 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: E0122 16:05:19.537326 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: E0122 16:05:19.537443 5032 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.539125 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.539149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.539158 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.539173 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.539183 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.641536 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.641628 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.641653 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.641688 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.641711 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.744532 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.744579 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.744591 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.744608 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.744620 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.752999 5032 generic.go:334] "Generic (PLEG): container finished" podID="b1410b11-001c-4b31-ab18-02e37bc11454" containerID="4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67" exitCode=0 Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.753070 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerDied","Data":"4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.764316 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.764384 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.764397 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.764407 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.764418 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.764428 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.766824 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.767757 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.768490 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-th95w" event={"ID":"4a49ee60-0dea-4adf-bcf7-d5717c64218e","Type":"ContainerStarted","Data":"c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.768528 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-th95w" event={"ID":"4a49ee60-0dea-4adf-bcf7-d5717c64218e","Type":"ContainerStarted","Data":"a3ebedf50381412de156a5319d0d6787c3c34e834bc07631638b069b5ee2b4ed"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.784821 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.806592 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.824362 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.837900 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.847014 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.847075 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.847090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.847111 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.847126 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.853471 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.878443 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.896724 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.912536 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.925962 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.947180 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.955195 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.955285 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.955311 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.955348 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.955396 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:19Z","lastTransitionTime":"2026-01-22T16:05:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:19 crc kubenswrapper[5032]: I0122 16:05:19.979788 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:19Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.019262 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.058445 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.058493 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.058507 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.058537 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.058551 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.058980 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.097782 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.141723 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.159162 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.159301 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.159347 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.159435 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:05:24.159389887 +0000 UTC m=+27.971558918 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.159466 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.159487 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.159558 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:24.159541171 +0000 UTC m=+27.971710222 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.159579 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:24.159569811 +0000 UTC m=+27.971738852 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.161042 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.161085 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.161098 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.161133 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.161147 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.174060 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.219966 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.257926 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.260409 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.260570 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.260831 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.260907 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.260927 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.260850 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.261003 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.261025 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.261005 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:24.260980626 +0000 UTC m=+28.073149667 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.261109 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:24.261090638 +0000 UTC m=+28.073259679 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.264375 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.264443 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.264456 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.264524 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.264537 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.300430 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.342039 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.367792 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.367848 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.367901 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.367932 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.367952 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.380592 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.417372 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.421396 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 05:34:09.792220483 +0000 UTC Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.454658 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.454779 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.454821 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:20 crc kubenswrapper[5032]: E0122 16:05:20.455020 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.462574 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.470265 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.470343 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.470358 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.470381 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.470397 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.498367 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.540513 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.574121 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.574198 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.574213 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.574239 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.574253 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.678208 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.678270 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.678282 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.678309 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.678322 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.781512 5032 generic.go:334] "Generic (PLEG): container finished" podID="b1410b11-001c-4b31-ab18-02e37bc11454" containerID="de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9" exitCode=0 Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.781609 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerDied","Data":"de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.783470 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.783577 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.783643 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.783679 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.783758 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.808801 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.838911 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.863568 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.889515 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.889571 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.889585 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.889612 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.889675 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:20Z","lastTransitionTime":"2026-01-22T16:05:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.892232 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.907678 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.928596 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.950605 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.970979 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:20 crc kubenswrapper[5032]: I0122 16:05:20.988909 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:20Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.004998 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.005055 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.005070 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.005106 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.005120 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.006715 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.027783 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.040391 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.059736 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.064754 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.065741 5032 scope.go:117] "RemoveContainer" containerID="4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0" Jan 22 16:05:21 crc kubenswrapper[5032]: E0122 16:05:21.065973 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.108551 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.108599 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.108613 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.108633 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.108646 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.211824 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.211899 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.212046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.212064 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.212106 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.314972 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.315022 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.315033 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.315055 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.315067 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.418569 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.419126 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.419149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.419177 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.419197 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.422219 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 10:37:59.759875155 +0000 UTC Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.453677 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:21 crc kubenswrapper[5032]: E0122 16:05:21.453887 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.524285 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.524337 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.524353 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.524374 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.524386 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.626973 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.627039 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.627059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.627086 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.627102 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.729716 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.730091 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.730178 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.730291 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.730452 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.757602 5032 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.791926 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.795705 5032 generic.go:334] "Generic (PLEG): container finished" podID="b1410b11-001c-4b31-ab18-02e37bc11454" containerID="fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f" exitCode=0 Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.795741 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerDied","Data":"fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.823393 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.834209 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.834275 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.834294 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.834322 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.834357 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.844149 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.862075 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.881026 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.906957 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.930451 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.938116 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.938148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.938159 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.938174 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.938188 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:21Z","lastTransitionTime":"2026-01-22T16:05:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.942608 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.963903 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.978085 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:21 crc kubenswrapper[5032]: I0122 16:05:21.994734 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.013152 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.034055 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.039784 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.039813 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.039822 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.039840 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.039852 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.047235 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.143137 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.143236 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.143247 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.143268 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.143282 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.246896 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.246931 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.246940 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.246956 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.246965 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.349593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.349633 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.349645 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.349663 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.349675 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.423327 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 22:11:49.748042727 +0000 UTC Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.452516 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.452568 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.452587 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.452612 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.452631 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.454120 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:22 crc kubenswrapper[5032]: E0122 16:05:22.454335 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.454120 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:22 crc kubenswrapper[5032]: E0122 16:05:22.454706 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.519531 5032 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.561765 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.561826 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.561845 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.561944 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.561967 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.665788 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.665850 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.665890 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.665913 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.665927 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.769702 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.769783 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.769807 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.769840 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.769932 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.808415 5032 generic.go:334] "Generic (PLEG): container finished" podID="b1410b11-001c-4b31-ab18-02e37bc11454" containerID="d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c" exitCode=0 Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.808488 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerDied","Data":"d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.841491 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.867338 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.873470 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.873510 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.873524 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.873548 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.873564 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.884850 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.903205 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.922457 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.940833 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.970838 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.975949 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.976009 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.976024 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.976046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.976060 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:22Z","lastTransitionTime":"2026-01-22T16:05:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:22 crc kubenswrapper[5032]: I0122 16:05:22.983627 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:22Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.002962 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.023084 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.038720 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.061360 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.080531 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.080601 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.080621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.080656 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.080677 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.085670 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.184196 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.184278 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.184297 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.184331 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.184351 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.259037 5032 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.287640 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.287975 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.288003 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.288079 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.288106 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.392610 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.392692 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.392716 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.392751 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.392779 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.424112 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 11:02:15.869879489 +0000 UTC Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.456181 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:23 crc kubenswrapper[5032]: E0122 16:05:23.456415 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.496654 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.496721 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.496741 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.496771 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.496799 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.600527 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.600601 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.600625 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.600659 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.600688 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.703677 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.703720 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.703739 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.703767 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.703792 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.806723 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.806800 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.806820 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.806849 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.806908 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.818235 5032 generic.go:334] "Generic (PLEG): container finished" podID="b1410b11-001c-4b31-ab18-02e37bc11454" containerID="b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428" exitCode=0 Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.818297 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerDied","Data":"b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.842799 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.869415 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.899650 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.910532 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.910607 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.910627 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.910655 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.910673 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:23Z","lastTransitionTime":"2026-01-22T16:05:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.926534 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.943725 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.960343 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.974813 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:23 crc kubenswrapper[5032]: I0122 16:05:23.996943 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:23Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.012293 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.013992 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.014051 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.014068 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.014090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.014106 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.027183 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.041957 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.059938 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.074349 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.117142 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.117178 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.117190 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.117207 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.117218 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.206284 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.206449 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.206500 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.206538 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:05:32.206509945 +0000 UTC m=+36.018678976 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.206604 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.206630 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.206747 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:32.20671741 +0000 UTC m=+36.018886451 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.206795 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:32.206780642 +0000 UTC m=+36.018949883 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.222521 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.222562 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.222572 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.222587 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.222597 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.307789 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.307907 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308091 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308112 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308167 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308191 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308124 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308293 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308318 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:32.308255338 +0000 UTC m=+36.120424519 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.308365 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:32.30834302 +0000 UTC m=+36.120512051 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.325131 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.325170 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.325179 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.325198 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.325211 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.424394 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 17:54:00.685222255 +0000 UTC Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.428052 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.428103 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.428115 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.428137 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.428151 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.455954 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.456085 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.456493 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:24 crc kubenswrapper[5032]: E0122 16:05:24.456541 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.530716 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.530775 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.530787 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.530807 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.530823 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.633902 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.633948 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.633964 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.633983 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.633995 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.736587 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.736648 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.736661 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.736687 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.736702 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.834105 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" event={"ID":"b1410b11-001c-4b31-ab18-02e37bc11454","Type":"ContainerStarted","Data":"a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.838734 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.838802 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.838821 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.838848 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.838913 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.844599 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.848484 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.852396 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.875554 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.876613 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.894845 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.912345 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.932213 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.942170 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.942225 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.942237 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.942255 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.942267 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:24Z","lastTransitionTime":"2026-01-22T16:05:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.959840 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:24 crc kubenswrapper[5032]: I0122 16:05:24.985697 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:24Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.006465 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.024894 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.041463 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.047396 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.047487 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.047508 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.047549 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.047570 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.068517 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.085829 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.100836 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.110769 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.126294 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.141427 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.150707 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.150794 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.150880 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.150910 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.150925 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.156500 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.172957 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.207492 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.221083 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.238017 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.253841 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.253934 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.253948 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.253971 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.253985 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.254337 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.268650 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.286182 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.310835 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.332356 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.357193 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.357251 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.357263 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.357287 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.357303 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.425131 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 01:12:13.683459076 +0000 UTC Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.454461 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:25 crc kubenswrapper[5032]: E0122 16:05:25.454651 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.460978 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.461036 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.461049 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.461070 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.461087 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.564728 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.564794 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.564812 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.564836 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.564853 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.668510 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.668582 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.668597 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.668620 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.668638 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.772475 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.772529 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.772543 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.772563 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.772576 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.848107 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.848928 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.873331 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.874779 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.874818 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.874829 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.874848 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.874928 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.887800 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.905377 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.922961 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.941067 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.956737 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.972509 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.977101 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.977142 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.977177 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.977194 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.977205 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:25Z","lastTransitionTime":"2026-01-22T16:05:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:25 crc kubenswrapper[5032]: I0122 16:05:25.993690 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:25Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.023417 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.037458 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.053276 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.071929 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.080225 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.080263 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.080274 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.080289 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.080299 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.086023 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.100236 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.184538 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.184604 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.184618 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.184641 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.184659 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.287287 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.287341 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.287356 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.287375 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.287389 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.390071 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.390173 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.390201 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.390235 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.390255 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.426045 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 17:49:03.803865391 +0000 UTC Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.454622 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.454668 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:26 crc kubenswrapper[5032]: E0122 16:05:26.454779 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:26 crc kubenswrapper[5032]: E0122 16:05:26.455001 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.472744 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.489970 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.493381 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.493416 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.493450 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.493470 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.493484 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.513910 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.532185 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.547505 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.600493 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.602439 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.602477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.602494 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.602554 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.602573 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.616095 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.635160 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.650438 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.666282 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.688244 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.705128 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.705975 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.706040 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.706060 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.706091 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.706111 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.729107 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.810024 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.810085 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.810266 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.810296 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.810318 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.858258 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.915550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.915611 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.915627 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.915653 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:26 crc kubenswrapper[5032]: I0122 16:05:26.915674 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:26Z","lastTransitionTime":"2026-01-22T16:05:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.018620 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.018694 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.018706 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.018751 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.018763 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.123661 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.123779 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.124020 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.124368 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.124786 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.228753 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.228826 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.228845 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.228905 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.228925 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.332305 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.332354 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.332372 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.332396 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.332413 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.426742 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 00:32:40.450451209 +0000 UTC Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.435768 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.435817 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.435835 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.435896 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.435917 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.453674 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:27 crc kubenswrapper[5032]: E0122 16:05:27.453839 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.539295 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.539366 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.539383 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.539410 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.539432 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.643105 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.643179 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.643199 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.643222 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.643243 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.746669 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.746726 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.746743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.746766 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.746791 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.810385 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.850004 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.850059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.850077 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.850107 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.850126 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.864849 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/0.log" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.870254 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f" exitCode=1 Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.870319 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.871633 5032 scope.go:117] "RemoveContainer" containerID="6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.896889 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:27Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.933091 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:27Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.954642 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.954699 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.954723 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.954753 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.954778 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:27Z","lastTransitionTime":"2026-01-22T16:05:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.959980 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:27Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:27 crc kubenswrapper[5032]: I0122 16:05:27.978310 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:27Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.006531 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.027634 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.051096 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.060687 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.060760 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.060781 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.060810 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.060828 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.076635 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.109753 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.123955 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.144217 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.160235 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.164979 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.165022 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.165039 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.165058 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.165073 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.177708 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.269220 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.269292 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.269316 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.269355 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.269378 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.372990 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.373063 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.373081 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.373108 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.373126 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.604096 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 20:35:38.935375333 +0000 UTC Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.604931 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.605000 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:28 crc kubenswrapper[5032]: E0122 16:05:28.605103 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.605699 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:28 crc kubenswrapper[5032]: E0122 16:05:28.605831 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:28 crc kubenswrapper[5032]: E0122 16:05:28.605988 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.612678 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.612735 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.612753 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.612782 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.612806 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.716060 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.716149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.716175 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.716208 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.716233 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.825957 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.826046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.826072 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.826107 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.826131 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.878388 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/0.log" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.882288 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.883070 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.904476 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.924650 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.930938 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.931003 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.931023 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.931051 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.931071 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:28Z","lastTransitionTime":"2026-01-22T16:05:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.947524 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.967274 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:28 crc kubenswrapper[5032]: I0122 16:05:28.990997 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:28Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.016174 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.030547 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.034104 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.034164 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.034182 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.034206 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.034226 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.058214 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.109677 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.132991 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.137066 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.137149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.137163 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.137193 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.137206 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.149695 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.173497 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.191900 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.241233 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.241281 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.241294 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.241311 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.241322 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.345155 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.345219 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.345238 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.345259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.345273 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.448767 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.448832 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.448846 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.448898 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.448914 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.552313 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.552386 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.552404 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.552428 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.552442 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.604636 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 23:15:11.001126816 +0000 UTC Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.609724 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn"] Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.610407 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.613670 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.614937 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.615252 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/56903a8f-394c-4fbf-90fc-8c058c31c8d6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.615310 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/56903a8f-394c-4fbf-90fc-8c058c31c8d6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.615367 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgpgq\" (UniqueName: \"kubernetes.io/projected/56903a8f-394c-4fbf-90fc-8c058c31c8d6-kube-api-access-fgpgq\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.615453 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/56903a8f-394c-4fbf-90fc-8c058c31c8d6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.616493 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.616562 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.616577 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.616596 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.616610 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.638388 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: E0122 16:05:29.638661 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.644460 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.644523 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.644542 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.644571 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.644593 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.663923 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: E0122 16:05:29.666532 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.671798 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.671831 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.671844 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.671883 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.671898 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.681972 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: E0122 16:05:29.686567 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.691266 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.691330 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.691344 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.691370 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.691386 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.695506 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: E0122 16:05:29.706412 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.710997 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.711046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.711060 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.711081 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.711096 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.712920 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.715981 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/56903a8f-394c-4fbf-90fc-8c058c31c8d6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.716027 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/56903a8f-394c-4fbf-90fc-8c058c31c8d6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.716055 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/56903a8f-394c-4fbf-90fc-8c058c31c8d6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.716085 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgpgq\" (UniqueName: \"kubernetes.io/projected/56903a8f-394c-4fbf-90fc-8c058c31c8d6-kube-api-access-fgpgq\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.717010 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/56903a8f-394c-4fbf-90fc-8c058c31c8d6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.717051 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/56903a8f-394c-4fbf-90fc-8c058c31c8d6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.723650 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/56903a8f-394c-4fbf-90fc-8c058c31c8d6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.726908 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: E0122 16:05:29.727881 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: E0122 16:05:29.728036 5032 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.731877 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.731909 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.731921 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.731943 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.731959 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.736227 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgpgq\" (UniqueName: \"kubernetes.io/projected/56903a8f-394c-4fbf-90fc-8c058c31c8d6-kube-api-access-fgpgq\") pod \"ovnkube-control-plane-749d76644c-2rfvn\" (UID: \"56903a8f-394c-4fbf-90fc-8c058c31c8d6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.743721 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.757587 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.774096 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.797631 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.809374 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.827505 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.835151 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.835275 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.835304 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.835341 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.835367 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.847171 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.864768 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:29Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.938606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.938692 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.938709 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.938754 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.938766 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:29Z","lastTransitionTime":"2026-01-22T16:05:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:29 crc kubenswrapper[5032]: I0122 16:05:29.939386 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.043008 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.043511 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.043528 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.043549 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.043565 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.146920 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.147003 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.147033 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.147062 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.147084 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.250663 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.250719 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.250742 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.250777 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.250797 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.355712 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.355747 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.355756 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.355772 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.355783 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.454078 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:30 crc kubenswrapper[5032]: E0122 16:05:30.454606 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.454183 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:30 crc kubenswrapper[5032]: E0122 16:05:30.454886 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.454165 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:30 crc kubenswrapper[5032]: E0122 16:05:30.455113 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.460416 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.461144 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.461196 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.461239 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.461266 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.564658 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.564736 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.564753 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.564777 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.564795 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.605169 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 00:29:26.095057398 +0000 UTC Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.668376 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.668452 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.668477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.668510 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.668530 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.760140 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-95g6b"] Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.761409 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:30 crc kubenswrapper[5032]: E0122 16:05:30.761498 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.771836 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.771905 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.771916 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.771943 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.771957 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.788912 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.801734 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.815483 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.828218 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.828465 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.828549 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsj2j\" (UniqueName: \"kubernetes.io/projected/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-kube-api-access-jsj2j\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.850448 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.866236 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.874973 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.875006 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.875017 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.875031 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.875041 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.884041 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.891121 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" event={"ID":"56903a8f-394c-4fbf-90fc-8c058c31c8d6","Type":"ContainerStarted","Data":"c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.891171 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" event={"ID":"56903a8f-394c-4fbf-90fc-8c058c31c8d6","Type":"ContainerStarted","Data":"721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.891183 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" event={"ID":"56903a8f-394c-4fbf-90fc-8c058c31c8d6","Type":"ContainerStarted","Data":"2858ddb64fda8aadefe8943a9d56d696fbf88cf48da9020743168bd4157fa52f"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.892836 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/1.log" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.893462 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/0.log" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.896175 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e" exitCode=1 Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.896210 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.896242 5032 scope.go:117] "RemoveContainer" containerID="6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.896992 5032 scope.go:117] "RemoveContainer" containerID="99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e" Jan 22 16:05:30 crc kubenswrapper[5032]: E0122 16:05:30.897174 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.901642 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.916959 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.930102 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsj2j\" (UniqueName: \"kubernetes.io/projected/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-kube-api-access-jsj2j\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.930180 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:30 crc kubenswrapper[5032]: E0122 16:05:30.930508 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:30 crc kubenswrapper[5032]: E0122 16:05:30.930669 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:31.430633491 +0000 UTC m=+35.242802522 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.944465 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.973466 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsj2j\" (UniqueName: \"kubernetes.io/projected/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-kube-api-access-jsj2j\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.977359 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.977415 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.977428 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.977450 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.977463 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:30Z","lastTransitionTime":"2026-01-22T16:05:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:30 crc kubenswrapper[5032]: I0122 16:05:30.978482 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:30Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.009577 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.029150 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.043332 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.053931 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.066070 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.078383 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.079962 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.080011 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.080032 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.080051 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.080063 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.097172 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.106731 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.119514 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.132774 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.147819 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.162705 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.180789 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.182330 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.182371 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.182385 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.182404 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.182420 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.199848 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.214558 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.235498 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.256979 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.277195 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.285478 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.285532 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.285545 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.285564 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.285577 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.290505 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.389490 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.389559 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.389577 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.389604 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.389624 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.435031 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:31 crc kubenswrapper[5032]: E0122 16:05:31.435253 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:31 crc kubenswrapper[5032]: E0122 16:05:31.435351 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:32.435328776 +0000 UTC m=+36.247497817 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.454565 5032 scope.go:117] "RemoveContainer" containerID="4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.492815 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.493352 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.493370 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.493398 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.493421 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.597373 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.597425 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.597442 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.597465 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.597491 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.605759 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 00:43:43.503959139 +0000 UTC Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.701425 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.701493 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.701522 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.701553 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.701580 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.804564 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.804637 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.804661 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.804694 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.804723 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.902401 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.904338 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.904673 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.906395 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.906466 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.906486 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.906538 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.906558 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:31Z","lastTransitionTime":"2026-01-22T16:05:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.912158 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/1.log" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.935890 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.950332 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.964712 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:31 crc kubenswrapper[5032]: I0122 16:05:31.987725 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:31Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.009678 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.010241 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.010298 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.010311 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.010331 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.010343 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.039683 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.055406 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.070205 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.085305 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.100971 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.112676 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.112731 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.112745 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.112766 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.112780 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.116175 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.132525 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.146128 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.178274 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.194441 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:32Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.216496 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.216546 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.216569 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.216607 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.216634 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.243307 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.243427 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.243507 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.243697 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.243774 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:48.24375254 +0000 UTC m=+52.055921611 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.244130 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.244290 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:05:48.244241352 +0000 UTC m=+52.056410423 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.244403 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:48.244374185 +0000 UTC m=+52.056543326 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.320398 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.320456 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.320512 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.320544 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.320569 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.344708 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.344786 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.344998 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.345014 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.345040 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.345054 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.345063 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.345077 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.345149 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:48.345121404 +0000 UTC m=+52.157290475 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.345181 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:48.345168365 +0000 UTC m=+52.157337426 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.424127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.424176 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.424188 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.424205 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.424217 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.446437 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.446666 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.446967 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:34.446939128 +0000 UTC m=+38.259108189 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.453944 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.454121 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.454137 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.454053 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.454504 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.454742 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.454980 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:32 crc kubenswrapper[5032]: E0122 16:05:32.455209 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.527534 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.527583 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.527599 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.527622 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.527643 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.606830 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 10:19:12.308839919 +0000 UTC Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.630380 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.630460 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.630478 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.630506 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.630530 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.733703 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.733761 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.733772 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.733791 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.733806 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.836621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.836681 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.836698 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.836734 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.836783 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.940120 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.940181 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.940198 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.940225 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:32 crc kubenswrapper[5032]: I0122 16:05:32.940243 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:32Z","lastTransitionTime":"2026-01-22T16:05:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.044523 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.044595 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.044618 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.044652 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.044676 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.147650 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.147722 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.147743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.147777 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.147801 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.251333 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.251445 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.251500 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.251529 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.251547 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.354799 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.354893 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.354911 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.354935 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.354955 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.458540 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.458593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.458613 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.458636 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.458654 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.561600 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.561653 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.561670 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.561738 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.561792 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.607069 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 13:51:24.488470402 +0000 UTC Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.664542 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.664614 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.664639 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.664669 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.664688 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.767623 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.767685 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.767708 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.767738 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.767760 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.871141 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.871207 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.871231 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.871259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.871282 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.974173 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.974253 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.974272 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.974300 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:33 crc kubenswrapper[5032]: I0122 16:05:33.974318 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:33Z","lastTransitionTime":"2026-01-22T16:05:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.077847 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.077959 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.077980 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.078011 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.078035 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.181493 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.181542 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.181558 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.181582 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.181600 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.284801 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.284903 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.284929 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.284963 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.284985 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.388407 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.388459 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.388478 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.388504 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.388522 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.454216 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.454261 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.454326 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.454270 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:34 crc kubenswrapper[5032]: E0122 16:05:34.454454 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:34 crc kubenswrapper[5032]: E0122 16:05:34.454614 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:34 crc kubenswrapper[5032]: E0122 16:05:34.454753 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:34 crc kubenswrapper[5032]: E0122 16:05:34.454893 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.472090 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:34 crc kubenswrapper[5032]: E0122 16:05:34.472270 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:34 crc kubenswrapper[5032]: E0122 16:05:34.472346 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:38.472322746 +0000 UTC m=+42.284491817 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.492551 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.492624 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.492642 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.492667 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.492687 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.595313 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.595356 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.595366 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.595382 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.595392 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.607751 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 07:42:18.805855981 +0000 UTC Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.698301 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.698365 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.698388 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.698418 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.698441 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.801493 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.801558 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.801582 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.801614 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.801637 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.905599 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.905661 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.905685 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.905719 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:34 crc kubenswrapper[5032]: I0122 16:05:34.905739 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:34Z","lastTransitionTime":"2026-01-22T16:05:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.008986 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.009066 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.009091 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.009127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.009147 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.113229 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.113308 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.113337 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.113365 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.113384 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.216705 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.216773 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.216787 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.216809 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.216838 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.320050 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.320125 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.320145 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.320171 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.320190 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.423553 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.423598 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.423614 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.423638 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.423656 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.527696 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.527770 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.527786 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.527837 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.527881 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.608451 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 10:50:14.744614781 +0000 UTC Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.632001 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.632063 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.632080 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.632107 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.632128 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.735497 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.735559 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.735579 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.735606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.735624 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.839240 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.839280 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.839290 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.839311 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.839322 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.942262 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.942314 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.942360 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.942430 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:35 crc kubenswrapper[5032]: I0122 16:05:35.942458 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:35Z","lastTransitionTime":"2026-01-22T16:05:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.045993 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.046051 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.046071 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.046095 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.046116 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.149649 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.149714 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.149729 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.149754 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.149770 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.253765 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.253891 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.253919 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.253958 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.253983 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.356750 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.356826 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.356845 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.356922 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.356947 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.454354 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.454502 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:36 crc kubenswrapper[5032]: E0122 16:05:36.456048 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:36 crc kubenswrapper[5032]: E0122 16:05:36.454567 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.454628 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:36 crc kubenswrapper[5032]: E0122 16:05:36.456161 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.454375 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:36 crc kubenswrapper[5032]: E0122 16:05:36.456251 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.461211 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.461292 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.461334 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.461357 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.461370 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.480494 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.509817 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.532300 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.558405 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.564911 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.565140 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.565295 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.565456 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.565813 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.579184 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.602961 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.609437 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 09:29:55.067580049 +0000 UTC Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.621730 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.639709 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.658495 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.670493 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.670817 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.670827 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.670852 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.670884 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.672962 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.688412 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.703775 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.719303 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.749413 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.763348 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:36Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.773391 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.773542 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.773632 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.773741 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.773837 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.877507 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.878399 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.878553 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.878701 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.878982 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.983082 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.983156 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.983174 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.983202 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:36 crc kubenswrapper[5032]: I0122 16:05:36.983223 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:36Z","lastTransitionTime":"2026-01-22T16:05:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.086575 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.086647 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.086668 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.086699 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.086719 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.194030 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.194099 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.194122 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.194163 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.194187 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.299948 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.299997 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.300007 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.300026 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.300038 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.403581 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.404114 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.404266 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.404430 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.404606 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.508560 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.508633 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.508657 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.508693 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.508719 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.610256 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 10:27:53.922535285 +0000 UTC Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.612564 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.612638 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.612660 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.612689 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.612709 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.716282 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.716350 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.716369 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.716397 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.716419 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.819892 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.820333 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.820415 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.820502 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.820567 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.924134 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.924213 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.924229 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.924259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:37 crc kubenswrapper[5032]: I0122 16:05:37.924277 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:37Z","lastTransitionTime":"2026-01-22T16:05:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.028743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.028812 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.028830 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.028888 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.028909 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.132432 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.132502 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.132526 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.132558 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.132584 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.235356 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.235414 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.235435 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.235464 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.235487 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.338000 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.338059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.338079 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.338101 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.338120 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.440732 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.440784 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.440802 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.440829 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.440847 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.454487 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.454559 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:38 crc kubenswrapper[5032]: E0122 16:05:38.454665 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.454509 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.454737 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:38 crc kubenswrapper[5032]: E0122 16:05:38.454852 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:38 crc kubenswrapper[5032]: E0122 16:05:38.455015 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:38 crc kubenswrapper[5032]: E0122 16:05:38.455332 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.521040 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:38 crc kubenswrapper[5032]: E0122 16:05:38.521317 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:38 crc kubenswrapper[5032]: E0122 16:05:38.521386 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:05:46.521363421 +0000 UTC m=+50.333532492 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.544668 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.544737 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.544750 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.544778 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.544794 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.611356 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 04:37:53.861140418 +0000 UTC Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.648936 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.649090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.649109 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.649141 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.649161 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.752195 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.752273 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.752291 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.752325 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.752347 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.856247 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.856343 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.856374 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.856404 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.856426 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.959939 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.960114 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.960148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.960187 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:38 crc kubenswrapper[5032]: I0122 16:05:38.960210 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:38Z","lastTransitionTime":"2026-01-22T16:05:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.064048 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.064111 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.064127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.064151 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.064173 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.167777 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.167845 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.167900 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.167936 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.167957 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.271313 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.271381 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.271400 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.271429 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.271452 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.373945 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.374008 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.374036 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.374064 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.374083 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.478364 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.478426 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.478444 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.478474 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.478493 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.582313 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.582387 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.582426 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.582464 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.582488 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.612007 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 06:26:05.477177109 +0000 UTC Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.686421 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.686472 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.686490 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.686518 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.686540 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.790048 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.790127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.790154 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.790190 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.790217 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.893631 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.893704 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.893730 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.893765 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.893791 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.932989 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.933059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.933085 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.933113 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.933130 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: E0122 16:05:39.954827 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:39Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.961838 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.961906 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.961927 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.961979 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.961992 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:39 crc kubenswrapper[5032]: E0122 16:05:39.982027 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:39Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.987062 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.987158 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.987179 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.987209 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:39 crc kubenswrapper[5032]: I0122 16:05:39.987232 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:39Z","lastTransitionTime":"2026-01-22T16:05:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.009589 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:40Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.014539 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.014608 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.014628 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.014653 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.014674 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.034805 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:40Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.039442 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.039492 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.039512 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.039540 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.039560 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.060190 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:40Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.060709 5032 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.062887 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.062953 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.062973 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.063001 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.063023 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.166095 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.166147 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.166193 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.166221 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.166243 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.269654 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.269756 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.269770 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.269793 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.269806 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.373888 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.373957 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.373972 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.373998 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.374014 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.454678 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.454915 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.454762 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.454921 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.455152 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.455304 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.455571 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:40 crc kubenswrapper[5032]: E0122 16:05:40.455748 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.476551 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.476585 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.476596 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.476610 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.476621 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.579419 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.579462 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.579477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.579502 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.579517 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.612923 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 09:00:57.939871156 +0000 UTC Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.682241 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.682272 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.682284 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.682300 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.682313 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.785703 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.785772 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.785794 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.785825 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.785849 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.889611 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.889691 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.889715 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.889744 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.889763 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.993437 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.993514 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.993535 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.993567 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:40 crc kubenswrapper[5032]: I0122 16:05:40.993589 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:40Z","lastTransitionTime":"2026-01-22T16:05:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.097250 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.097349 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.097377 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.097409 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.097426 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.200950 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.201014 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.201028 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.201057 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.201073 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.305253 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.305361 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.305385 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.305416 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.305448 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.409289 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.409803 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.410037 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.410232 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.410398 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.514487 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.514559 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.514578 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.514606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.514627 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.614035 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 12:21:34.065935022 +0000 UTC Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.617230 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.617280 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.617297 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.617325 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.617343 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.720645 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.720716 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.720735 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.720760 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.720778 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.824587 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.824652 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.824672 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.824697 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.824715 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.928190 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.928246 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.928262 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.928288 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:41 crc kubenswrapper[5032]: I0122 16:05:41.928307 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:41Z","lastTransitionTime":"2026-01-22T16:05:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.035515 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.035595 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.035617 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.035651 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.035678 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.139172 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.139258 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.139275 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.139305 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.139326 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.146823 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.182553 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a23dd731213ea3957cb327cf3bba01352976047037efa4849d8a0cd9a53f51f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:26Z\\\",\\\"message\\\":\\\"g/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:26.435798 6376 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0122 16:05:26.435948 6376 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0122 16:05:26.436036 6376 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 16:05:26.436105 6376 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 16:05:26.436195 6376 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:26.436333 6376 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:26.436508 6376 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:26.436677 6376 factory.go:656] Stopping watch factory\\\\nI0122 16:05:26.436808 6376 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0122 16:05:26.436931 6376 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:26.437008 6376 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0122 16:05:26.437065 6376 handler.go:208] Removed *v1.Node event handler 2\\\\nI0122 16:05:26.437122 6376 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:26.437177 6376 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:26.437231 6376 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.201357 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.220492 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.242437 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.243220 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.243304 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.243322 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.243351 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.243372 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.264446 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.289341 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.312712 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.332714 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.365190 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.365257 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.365276 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.365307 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.365329 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.379608 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.406233 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.426492 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.446662 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.454674 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.454769 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:42 crc kubenswrapper[5032]: E0122 16:05:42.454830 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.454683 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:42 crc kubenswrapper[5032]: E0122 16:05:42.454997 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:42 crc kubenswrapper[5032]: E0122 16:05:42.455139 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.455230 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:42 crc kubenswrapper[5032]: E0122 16:05:42.455323 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.460643 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.467804 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.467879 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.467895 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.467917 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.467934 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.480564 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.498076 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:42Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.570987 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.571113 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.571132 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.571158 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.571176 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.615062 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 01:14:41.631432226 +0000 UTC Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.675439 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.675502 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.675520 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.675593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.675623 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.778756 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.778848 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.778904 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.778937 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.778961 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.882464 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.882577 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.882614 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.882663 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.882676 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.985766 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.985898 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.985924 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.985952 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:42 crc kubenswrapper[5032]: I0122 16:05:42.985972 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:42Z","lastTransitionTime":"2026-01-22T16:05:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.090079 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.090222 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.090250 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.090287 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.090312 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.194947 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.195019 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.195037 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.195066 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.195086 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.298986 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.299070 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.299090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.299120 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.299141 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.402357 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.402446 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.402475 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.402508 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.402532 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.504618 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.504668 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.504683 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.504706 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.504719 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.608366 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.608443 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.608456 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.608480 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.608493 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.615802 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 20:08:38.115380636 +0000 UTC Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.711398 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.711455 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.711474 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.711501 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.711520 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.815323 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.815396 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.815414 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.815446 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.815467 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.918908 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.918979 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.918998 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.919026 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:43 crc kubenswrapper[5032]: I0122 16:05:43.919047 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:43Z","lastTransitionTime":"2026-01-22T16:05:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.022304 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.022350 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.022362 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.022378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.022387 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.125811 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.125887 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.125901 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.125925 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.125938 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.231616 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.231688 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.231706 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.231733 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.231751 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.335395 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.335450 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.335460 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.335481 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.335493 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.438656 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.438734 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.438764 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.438803 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.438825 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.454468 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.454521 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.454552 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.454920 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:44 crc kubenswrapper[5032]: E0122 16:05:44.454913 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:44 crc kubenswrapper[5032]: E0122 16:05:44.455007 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:44 crc kubenswrapper[5032]: E0122 16:05:44.455060 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:44 crc kubenswrapper[5032]: E0122 16:05:44.455479 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.455775 5032 scope.go:117] "RemoveContainer" containerID="99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.478916 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.506567 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.529568 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.542395 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.542475 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.542501 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.542535 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.542567 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.552926 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.572964 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.600908 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.616313 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 06:52:09.544053232 +0000 UTC Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.625168 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.642972 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.646725 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.646823 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.646844 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.646930 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.647021 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.663644 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.681525 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.701568 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.719103 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.741127 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.751081 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.751142 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.751169 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.751197 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.751216 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.778941 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.797710 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:44Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.854888 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.854946 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.854956 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.854976 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.854991 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.960023 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.960477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.960491 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.960510 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.960522 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:44Z","lastTransitionTime":"2026-01-22T16:05:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.985778 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/1.log" Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.988248 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff"} Jan 22 16:05:44 crc kubenswrapper[5032]: I0122 16:05:44.989281 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.003330 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.030753 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.049280 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.062525 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.062576 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.062592 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.062616 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.062632 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.064582 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.079276 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.094901 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.115793 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.130577 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.145985 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.160261 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.165721 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.165794 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.165806 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.165824 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.165835 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.177877 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.192405 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.204363 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.222361 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.237341 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:45Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.269654 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.269761 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.269782 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.269811 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.269903 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.373148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.373220 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.373239 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.373269 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.373288 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.476581 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.476683 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.476733 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.476765 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.476810 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.580815 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.580899 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.580918 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.580940 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.580954 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.616724 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 21:43:25.455012923 +0000 UTC Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.684814 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.684890 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.684907 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.684928 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.684944 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.794732 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.794822 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.794841 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.795363 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.795643 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.898624 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.898726 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.898736 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.898758 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.898770 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:45Z","lastTransitionTime":"2026-01-22T16:05:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.995621 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/2.log" Jan 22 16:05:45 crc kubenswrapper[5032]: I0122 16:05:45.997198 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/1.log" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.001408 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff" exitCode=1 Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.001474 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.001524 5032 scope.go:117] "RemoveContainer" containerID="99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.002274 5032 scope.go:117] "RemoveContainer" containerID="175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff" Jan 22 16:05:46 crc kubenswrapper[5032]: E0122 16:05:46.002442 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.002513 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.002545 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.002556 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.002573 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.002591 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.022433 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.036393 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.049619 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.067548 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.100083 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.106110 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.106170 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.106191 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.106220 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.106240 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.118186 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.140432 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.163506 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.180597 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.202384 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.208790 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.208919 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.208941 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.208976 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.208997 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.219957 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.242522 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.256572 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.276656 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.295057 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.312065 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.312117 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.312131 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.312152 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.312166 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.415939 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.415994 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.416007 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.416028 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.416044 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.454026 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.454198 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:46 crc kubenswrapper[5032]: E0122 16:05:46.454382 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.454450 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.454498 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:46 crc kubenswrapper[5032]: E0122 16:05:46.454575 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:46 crc kubenswrapper[5032]: E0122 16:05:46.454680 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:46 crc kubenswrapper[5032]: E0122 16:05:46.454928 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.474280 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.495349 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.513154 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.518607 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.518684 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.518705 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.518735 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.518761 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.545413 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99a1678f44de8d18fdea77916906a7a2679bb1920e1f11466128b8b76749801e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"08] Removed *v1.Node event handler 2\\\\nI0122 16:05:29.748847 6512 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 16:05:29.748882 6512 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0122 16:05:29.748976 6512 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.748805 6512 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 16:05:29.750174 6512 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:29.750256 6512 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:29.750289 6512 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:29.750336 6512 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 16:05:29.750370 6512 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:29.750402 6512 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:29.750965 6512 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0122 16:05:29.751010 6512 factory.go:656] Stopping watch factory\\\\nI0122 16:05:29.751026 6512 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:05:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.563705 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.582494 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.600998 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.616951 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.617222 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 12:04:28.177066941 +0000 UTC Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.618235 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:46 crc kubenswrapper[5032]: E0122 16:05:46.618381 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:46 crc kubenswrapper[5032]: E0122 16:05:46.618450 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:06:02.618432555 +0000 UTC m=+66.430601586 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.622378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.622422 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.622431 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.622448 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.622467 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.645903 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.665643 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.684714 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.698678 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.714517 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.731312 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.731368 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.731377 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.731393 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.731408 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.733786 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.750496 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:46Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.835942 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.835991 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.836002 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.836025 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.836037 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.938878 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.938928 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.938941 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.938961 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:46 crc kubenswrapper[5032]: I0122 16:05:46.938974 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:46Z","lastTransitionTime":"2026-01-22T16:05:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.007082 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/2.log" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.011921 5032 scope.go:117] "RemoveContainer" containerID="175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff" Jan 22 16:05:47 crc kubenswrapper[5032]: E0122 16:05:47.012160 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.025614 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.038951 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.041628 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.041746 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.042018 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.042111 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.042178 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.060629 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.084112 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.101193 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.115952 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.136745 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.145766 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.145813 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.145827 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.145849 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.145885 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.157809 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.173453 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.194929 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.211513 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.234365 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.249074 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.249126 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.249142 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.249166 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.249183 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.254201 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.270651 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.285799 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:47Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.351642 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.351738 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.351764 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.351799 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.351837 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.454851 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.454925 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.454939 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.454957 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.454970 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.558167 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.558262 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.558282 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.558312 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.558340 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.617885 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 10:09:17.565350423 +0000 UTC Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.661935 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.662001 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.662018 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.662044 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.662064 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.765214 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.765289 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.765314 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.765346 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.765371 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.868781 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.869070 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.869120 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.869148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.869483 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.972452 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.972494 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.972502 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.972515 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:47 crc kubenswrapper[5032]: I0122 16:05:47.972526 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:47Z","lastTransitionTime":"2026-01-22T16:05:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.074945 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.074989 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.074997 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.075015 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.075024 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.178111 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.178190 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.178203 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.178226 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.178239 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.280749 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.280802 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.280812 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.280831 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.280887 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.339276 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.339384 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.339481 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:06:20.339451377 +0000 UTC m=+84.151620408 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.339504 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.339551 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.339569 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:06:20.33955342 +0000 UTC m=+84.151722451 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.339698 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.339734 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:06:20.339728044 +0000 UTC m=+84.151897075 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.383779 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.383842 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.383887 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.383912 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.383923 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.440446 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.440520 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440699 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440731 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440749 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440823 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:06:20.44080249 +0000 UTC m=+84.252971531 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440699 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440898 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440918 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.440996 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:06:20.440981554 +0000 UTC m=+84.253150605 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.454024 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.454150 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.454218 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.454200 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.454158 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.454385 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.454526 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:48 crc kubenswrapper[5032]: E0122 16:05:48.454594 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.486635 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.486691 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.486709 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.486733 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.486752 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.589878 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.589913 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.589925 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.589943 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.589955 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.618413 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 05:20:30.789154386 +0000 UTC Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.693187 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.693256 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.693272 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.693295 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.693312 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.796992 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.797046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.797062 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.797082 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.797098 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.901217 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.901293 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.901312 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.901335 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.901349 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:48Z","lastTransitionTime":"2026-01-22T16:05:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.902199 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.919118 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.922386 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:48Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.938253 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:48Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.957306 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:48Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:48 crc kubenswrapper[5032]: I0122 16:05:48.974015 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:48Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.002533 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:48Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.007372 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.007433 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.007451 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.007479 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.007499 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.022021 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.046776 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.070956 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.093031 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.112393 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.112437 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.112449 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.112470 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.112487 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.113358 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.132838 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.152025 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.179006 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.200851 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.216005 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.216078 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.216101 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.216135 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.216163 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.217093 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:49Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.319514 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.319574 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.319590 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.319617 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.319635 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.422035 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.422097 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.422257 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.422289 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.422306 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.525240 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.525284 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.525301 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.525323 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.525340 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.619644 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 01:03:35.317546558 +0000 UTC Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.628255 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.628325 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.628342 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.628365 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.628381 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.730983 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.731031 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.731047 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.731069 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.731086 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.833952 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.834010 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.834027 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.834050 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.834064 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.937683 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.937811 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.937832 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.937896 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:49 crc kubenswrapper[5032]: I0122 16:05:49.937918 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:49Z","lastTransitionTime":"2026-01-22T16:05:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.040816 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.040892 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.040909 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.040929 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.040941 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.143627 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.143687 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.143703 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.143732 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.143750 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.202558 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.202621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.202632 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.202651 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.202664 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.225001 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:50Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.231734 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.231814 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.231835 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.231898 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.231920 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.254998 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:50Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.260910 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.260972 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.260986 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.261015 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.261043 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.280095 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:50Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.285825 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.285934 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.285947 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.285966 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.285981 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.302375 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:50Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.308365 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.308447 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.308461 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.308485 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.308498 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.327904 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:50Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.328146 5032 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.329747 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.329795 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.329809 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.329828 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.329841 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.433096 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.433143 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.433155 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.433175 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.433189 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.454047 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.454086 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.454233 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.454068 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.454240 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.454416 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.454571 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:50 crc kubenswrapper[5032]: E0122 16:05:50.454664 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.537665 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.537757 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.537776 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.537812 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.537836 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.620400 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 21:48:02.540279682 +0000 UTC Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.641271 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.641343 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.641362 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.641393 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.641414 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.745267 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.745337 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.745353 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.745379 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.745397 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.849223 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.849268 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.849283 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.849302 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.849314 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.952375 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.952432 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.952447 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.952465 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:50 crc kubenswrapper[5032]: I0122 16:05:50.952477 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:50Z","lastTransitionTime":"2026-01-22T16:05:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.056314 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.056377 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.056392 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.056409 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.056421 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.160059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.160135 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.160158 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.160191 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.160265 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.263527 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.263563 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.263576 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.263593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.263605 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.366378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.366429 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.366442 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.366486 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.366500 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.469437 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.469504 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.469522 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.469552 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.469572 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.572791 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.572844 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.572880 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.572902 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.572918 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.621165 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 04:41:05.108678369 +0000 UTC Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.677288 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.677374 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.677397 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.677433 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.677457 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.780801 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.780889 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.780904 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.780929 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.780944 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.884588 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.884674 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.884693 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.884723 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.884743 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.988430 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.988534 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.988566 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.988608 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:51 crc kubenswrapper[5032]: I0122 16:05:51.988634 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:51Z","lastTransitionTime":"2026-01-22T16:05:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.092187 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.092237 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.092248 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.092270 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.092285 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.195216 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.195277 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.195294 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.195319 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.195339 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.299035 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.299107 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.299127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.299155 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.299176 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.402978 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.403097 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.403116 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.403146 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.403163 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.454068 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.454167 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.454168 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:52 crc kubenswrapper[5032]: E0122 16:05:52.454286 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.454374 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:52 crc kubenswrapper[5032]: E0122 16:05:52.454459 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:52 crc kubenswrapper[5032]: E0122 16:05:52.455257 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:52 crc kubenswrapper[5032]: E0122 16:05:52.454574 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.507271 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.507345 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.507357 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.507376 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.507391 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.610133 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.610199 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.610211 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.610229 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.610241 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.621579 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 02:43:50.52274455 +0000 UTC Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.713115 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.713159 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.713173 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.713197 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.713211 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.817106 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.817184 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.817201 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.817227 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.817245 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.920781 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.920906 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.920926 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.920953 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:52 crc kubenswrapper[5032]: I0122 16:05:52.920971 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:52Z","lastTransitionTime":"2026-01-22T16:05:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.023694 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.023738 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.023750 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.023767 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.023777 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.127120 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.127195 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.127216 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.127242 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.127259 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.231230 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.231316 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.231343 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.231376 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.231398 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.334731 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.334852 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.334917 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.334956 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.334981 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.447458 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.447540 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.447593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.447622 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.447644 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.550747 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.550806 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.550820 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.550842 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.550892 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.622767 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 20:25:30.869797265 +0000 UTC Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.653643 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.653710 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.653723 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.653743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.653756 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.757087 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.757149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.757169 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.757196 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.757217 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.860105 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.860163 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.860180 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.860206 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.860254 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.962762 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.962831 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.962850 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.962911 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:53 crc kubenswrapper[5032]: I0122 16:05:53.962931 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:53Z","lastTransitionTime":"2026-01-22T16:05:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.066262 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.066316 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.066332 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.066359 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.066377 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.169261 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.169326 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.169345 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.169374 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.169392 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.272446 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.272522 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.272544 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.272577 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.272599 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.376438 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.376511 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.376527 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.376553 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.376571 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.454400 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.454458 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.454455 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:54 crc kubenswrapper[5032]: E0122 16:05:54.454608 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.454629 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:54 crc kubenswrapper[5032]: E0122 16:05:54.454773 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:54 crc kubenswrapper[5032]: E0122 16:05:54.454828 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:54 crc kubenswrapper[5032]: E0122 16:05:54.454939 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.479736 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.479790 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.479808 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.479832 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.479851 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.583743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.583817 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.583837 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.583901 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.583920 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.623491 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 05:04:36.247102907 +0000 UTC Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.687309 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.687378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.687396 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.687422 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.687441 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.791398 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.791467 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.791489 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.791519 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.791544 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.895174 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.895239 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.895257 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.895281 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.895306 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.998924 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.998986 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.999007 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.999034 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:54 crc kubenswrapper[5032]: I0122 16:05:54.999051 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:54Z","lastTransitionTime":"2026-01-22T16:05:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.103974 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.104029 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.104048 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.104069 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.104086 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.206995 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.207066 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.207092 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.207125 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.207149 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.310674 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.310726 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.310749 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.310779 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.310802 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.414519 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.414563 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.414572 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.414586 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.414595 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.529270 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.529320 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.529337 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.529360 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.529377 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.624675 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 20:46:23.921251862 +0000 UTC Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.633191 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.633244 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.633260 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.633284 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.633301 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.736602 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.736691 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.736715 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.736748 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.736774 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.840026 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.840105 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.840123 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.840144 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.840157 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.942916 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.942975 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.942989 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.943010 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:55 crc kubenswrapper[5032]: I0122 16:05:55.943025 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:55Z","lastTransitionTime":"2026-01-22T16:05:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.045672 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.045745 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.045762 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.045785 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.045802 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.148377 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.148435 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.148448 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.148467 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.148482 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.252234 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.252299 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.252316 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.252343 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.252361 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.356250 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.356325 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.356338 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.356369 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.356384 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.453655 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.453679 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.453653 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.453741 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:56 crc kubenswrapper[5032]: E0122 16:05:56.453854 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:56 crc kubenswrapper[5032]: E0122 16:05:56.453972 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:56 crc kubenswrapper[5032]: E0122 16:05:56.454132 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:56 crc kubenswrapper[5032]: E0122 16:05:56.454227 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.461335 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.461424 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.461440 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.461495 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.461515 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.477504 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.491986 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.507640 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.519756 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.535971 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.550605 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.565160 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.565215 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.565230 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.565247 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.565259 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.571756 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.593321 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.620225 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.625826 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 07:38:17.606158865 +0000 UTC Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.638075 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.657701 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.669063 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.669100 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.669115 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.669137 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.669152 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.673389 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.694928 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.714763 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.740416 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.756212 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:05:56Z is after 2025-08-24T17:21:41Z" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.772258 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.772294 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.772307 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.772325 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.772339 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.878758 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.878840 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.878869 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.878935 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.878960 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.982069 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.982125 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.982134 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.982149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:56 crc kubenswrapper[5032]: I0122 16:05:56.982159 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:56Z","lastTransitionTime":"2026-01-22T16:05:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.085823 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.085927 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.085945 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.085975 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.085993 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.189550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.189608 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.189621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.189644 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.189659 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.293530 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.293606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.293618 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.293637 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.293648 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.397020 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.397088 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.397102 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.397127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.397141 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.500472 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.500534 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.500550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.500574 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.500592 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.602744 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.602786 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.602799 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.602816 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.602828 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.626582 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 09:18:52.173671501 +0000 UTC Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.705609 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.705688 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.705714 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.705750 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.705773 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.809542 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.809611 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.809630 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.809656 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.809677 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.911980 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.912015 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.912026 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.912040 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:57 crc kubenswrapper[5032]: I0122 16:05:57.912051 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:57Z","lastTransitionTime":"2026-01-22T16:05:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.015705 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.015781 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.015799 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.015824 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.015842 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.118690 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.118740 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.118756 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.118780 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.118796 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.222055 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.222181 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.222284 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.222349 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.222376 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.325448 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.325560 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.325944 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.326534 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.326644 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.430535 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.430614 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.430628 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.430706 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.430723 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.454211 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:05:58 crc kubenswrapper[5032]: E0122 16:05:58.454381 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.454919 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:05:58 crc kubenswrapper[5032]: E0122 16:05:58.454985 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.455097 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:05:58 crc kubenswrapper[5032]: E0122 16:05:58.455142 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.455191 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:05:58 crc kubenswrapper[5032]: E0122 16:05:58.455233 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.540618 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.540673 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.540691 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.540719 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.540740 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.627568 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 19:31:27.167549153 +0000 UTC Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.643778 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.643829 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.643977 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.644025 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.644043 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.747497 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.747553 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.747569 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.747594 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.747611 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.850353 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.850397 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.850408 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.850426 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.850438 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.952814 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.952852 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.952904 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.952934 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:58 crc kubenswrapper[5032]: I0122 16:05:58.952950 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:58Z","lastTransitionTime":"2026-01-22T16:05:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.055084 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.055156 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.055173 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.055196 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.055214 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.158825 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.158938 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.158962 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.158994 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.159019 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.262972 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.263038 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.263049 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.263074 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.263087 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.365471 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.365527 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.365565 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.365592 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.365612 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.469749 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.469812 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.469823 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.469845 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.469886 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.572785 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.572840 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.572851 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.572916 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.572932 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.628031 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 06:59:35.974924555 +0000 UTC Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.675837 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.675970 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.675997 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.676032 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.676055 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.778734 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.778783 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.778794 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.778810 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.778821 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.882032 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.882110 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.882141 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.882164 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.882179 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.984637 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.984703 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.984718 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.984948 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:05:59 crc kubenswrapper[5032]: I0122 16:05:59.984973 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:05:59Z","lastTransitionTime":"2026-01-22T16:05:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.088154 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.088223 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.088238 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.088259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.088274 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.191795 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.191899 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.191920 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.191949 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.191968 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.294254 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.294316 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.294327 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.294349 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.294362 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.397896 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.397968 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.397981 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.398021 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.398033 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.453981 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.454059 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.454016 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.454269 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.454286 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.454395 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.454548 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.454664 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.501250 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.501307 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.501320 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.501350 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.501367 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.605089 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.605140 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.605155 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.605172 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.605184 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.629036 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 11:32:41.923720606 +0000 UTC Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.707992 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.708045 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.708059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.708081 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.708095 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.726521 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.726562 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.726571 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.726588 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.726599 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.748163 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:00Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.752647 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.752699 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.752715 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.752739 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.752753 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.767223 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:00Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.771610 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.771652 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.771671 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.771691 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.771705 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.785865 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:00Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.790216 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.790253 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.790261 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.790279 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.790291 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.806027 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:00Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.810669 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.810699 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.810709 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.810726 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.810737 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.832065 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:00Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:00 crc kubenswrapper[5032]: E0122 16:06:00.832223 5032 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.834421 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.834463 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.834474 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.834490 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.834500 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.937050 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.937104 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.937122 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.937148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:00 crc kubenswrapper[5032]: I0122 16:06:00.937162 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:00Z","lastTransitionTime":"2026-01-22T16:06:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.040310 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.040362 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.040373 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.040396 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.040407 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.143324 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.143375 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.143392 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.143411 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.143426 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.247497 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.247559 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.247578 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.247621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.247640 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.350412 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.350458 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.350471 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.350496 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.350509 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.452848 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.452931 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.452944 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.452963 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.452975 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.455131 5032 scope.go:117] "RemoveContainer" containerID="175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff" Jan 22 16:06:01 crc kubenswrapper[5032]: E0122 16:06:01.455576 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.555935 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.556031 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.556050 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.556110 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.556132 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.630240 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 23:55:45.783966313 +0000 UTC Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.659776 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.659854 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.659918 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.660251 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.660271 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.763660 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.764113 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.764232 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.764330 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.764425 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.867663 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.867811 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.867833 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.867897 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.867916 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.971156 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.971642 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.971773 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.971983 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:01 crc kubenswrapper[5032]: I0122 16:06:01.972128 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:01Z","lastTransitionTime":"2026-01-22T16:06:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.074684 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.075081 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.075182 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.075290 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.075402 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.178185 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.178234 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.178247 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.178269 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.178284 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.280706 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.280781 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.280795 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.280814 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.280826 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.384553 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.384607 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.384626 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.384657 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.384674 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.454842 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.454948 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:02 crc kubenswrapper[5032]: E0122 16:06:02.455102 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.455165 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:02 crc kubenswrapper[5032]: E0122 16:06:02.455364 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.455458 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:02 crc kubenswrapper[5032]: E0122 16:06:02.455607 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:02 crc kubenswrapper[5032]: E0122 16:06:02.457079 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.487968 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.488005 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.488020 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.488038 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.488050 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.590387 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.590443 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.590456 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.590478 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.590492 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.630426 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 13:02:53.145301626 +0000 UTC Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.693354 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.693429 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.693449 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.693481 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.693501 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.704649 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:02 crc kubenswrapper[5032]: E0122 16:06:02.704783 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:06:02 crc kubenswrapper[5032]: E0122 16:06:02.704849 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:06:34.704832244 +0000 UTC m=+98.517001275 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.796167 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.796224 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.796235 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.796259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.796271 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.900026 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.900184 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.900207 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.900236 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:02 crc kubenswrapper[5032]: I0122 16:06:02.900260 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:02Z","lastTransitionTime":"2026-01-22T16:06:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.003011 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.003044 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.003054 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.003071 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.003085 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.069041 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/0.log" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.069105 5032 generic.go:334] "Generic (PLEG): container finished" podID="d111dfc5-e30e-477d-81c1-afcc5bd064ad" containerID="c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136" exitCode=1 Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.069232 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerDied","Data":"c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.069770 5032 scope.go:117] "RemoveContainer" containerID="c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.088287 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.105785 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.107286 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.107418 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.107482 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.107557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.107620 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.132908 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.147138 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.164840 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.183590 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.203257 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.213144 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.213438 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.213640 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.213717 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.214189 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.221388 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.234963 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.254643 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.272127 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.291286 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.313048 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"2026-01-22T16:05:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92\\\\n2026-01-22T16:05:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92 to /host/opt/cni/bin/\\\\n2026-01-22T16:05:17Z [verbose] multus-daemon started\\\\n2026-01-22T16:05:17Z [verbose] Readiness Indicator file check\\\\n2026-01-22T16:06:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.317146 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.317187 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.317197 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.317215 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.317226 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.328165 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.341611 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.357408 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:03Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.419698 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.419752 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.419771 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.419797 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.419815 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.522505 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.522566 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.522583 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.522610 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.522628 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.625905 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.625972 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.625994 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.626022 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.626040 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.632060 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 19:06:51.233838766 +0000 UTC Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.729202 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.729255 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.729268 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.729286 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.729299 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.831794 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.831902 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.831927 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.831954 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.831975 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.934423 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.934483 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.934496 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.934520 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:03 crc kubenswrapper[5032]: I0122 16:06:03.934534 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:03Z","lastTransitionTime":"2026-01-22T16:06:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.037604 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.037679 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.037700 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.037731 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.037750 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.076316 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/0.log" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.076409 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerStarted","Data":"38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.106617 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.122451 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.139632 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.141343 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.141403 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.141423 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.141453 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.141473 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.159820 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.179847 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.194950 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.215610 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.231251 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.244197 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.244331 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.244391 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.244454 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.244518 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.281851 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.312076 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.325834 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.339782 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.348243 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.348366 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.348429 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.348503 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.348582 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.351262 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.364017 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.380353 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"2026-01-22T16:05:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92\\\\n2026-01-22T16:05:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92 to /host/opt/cni/bin/\\\\n2026-01-22T16:05:17Z [verbose] multus-daemon started\\\\n2026-01-22T16:05:17Z [verbose] Readiness Indicator file check\\\\n2026-01-22T16:06:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:06:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.397433 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:04Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.469469 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.469572 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.469646 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.469650 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:04 crc kubenswrapper[5032]: E0122 16:06:04.469831 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:04 crc kubenswrapper[5032]: E0122 16:06:04.469982 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:04 crc kubenswrapper[5032]: E0122 16:06:04.470187 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:04 crc kubenswrapper[5032]: E0122 16:06:04.470345 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.471572 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.471619 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.471637 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.471666 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.471684 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.574241 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.574313 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.574340 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.574374 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.574398 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.633082 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 13:49:57.254350991 +0000 UTC Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.677732 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.677790 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.677808 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.677837 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.677860 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.781288 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.781380 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.781399 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.781428 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.781454 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.885527 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.885586 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.885600 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.885621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.885634 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.989320 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.989399 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.989421 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.989455 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:04 crc kubenswrapper[5032]: I0122 16:06:04.989478 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:04Z","lastTransitionTime":"2026-01-22T16:06:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.093644 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.093699 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.093719 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.093744 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.093762 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.196447 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.196496 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.196506 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.196522 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.196532 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.299253 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.299318 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.299332 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.299355 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.299368 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.402660 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.402719 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.402730 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.402751 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.402763 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.505927 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.505979 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.505989 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.506006 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.506018 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.609540 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.609600 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.609622 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.609649 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.609667 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.634148 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 17:04:00.776141653 +0000 UTC Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.712484 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.712531 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.712540 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.712557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.712568 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.815169 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.815232 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.815255 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.815286 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.815301 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.917205 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.917275 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.917295 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.917323 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:05 crc kubenswrapper[5032]: I0122 16:06:05.917345 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:05Z","lastTransitionTime":"2026-01-22T16:06:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.020545 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.020612 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.020632 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.020659 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.020685 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.130634 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.130733 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.130753 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.130783 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.130801 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.234552 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.234609 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.234622 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.234645 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.234660 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.338406 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.338477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.338492 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.338513 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.338526 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.443019 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.443093 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.443118 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.443149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.443168 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.455144 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:06 crc kubenswrapper[5032]: E0122 16:06:06.455584 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.456172 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.456202 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:06 crc kubenswrapper[5032]: E0122 16:06:06.456323 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.456368 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:06 crc kubenswrapper[5032]: E0122 16:06:06.456507 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:06 crc kubenswrapper[5032]: E0122 16:06:06.456602 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.480259 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.497708 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.515090 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.527521 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.541968 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.547092 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.547152 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.547180 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.547213 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.547238 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.560695 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.578475 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"2026-01-22T16:05:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92\\\\n2026-01-22T16:05:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92 to /host/opt/cni/bin/\\\\n2026-01-22T16:05:17Z [verbose] multus-daemon started\\\\n2026-01-22T16:05:17Z [verbose] Readiness Indicator file check\\\\n2026-01-22T16:06:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:06:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.599234 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.613921 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.632395 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.634657 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 15:23:18.358782937 +0000 UTC Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.646943 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.650030 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.650093 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.650114 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.650144 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.650163 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.665300 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.683572 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.703984 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.722071 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.737356 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:06Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.753548 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.753598 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.753621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.753647 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.753665 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.857155 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.857246 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.857270 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.857302 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.857320 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.959962 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.960061 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.960093 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.960130 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:06 crc kubenswrapper[5032]: I0122 16:06:06.960150 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:06Z","lastTransitionTime":"2026-01-22T16:06:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.063518 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.063570 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.063581 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.063601 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.063614 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.166890 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.166931 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.166942 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.166960 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.166971 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.269770 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.269804 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.269812 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.269827 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.269837 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.372776 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.372817 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.372827 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.372849 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.372878 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.475880 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.475937 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.475953 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.475975 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.475989 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.579024 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.579104 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.579118 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.579142 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.579156 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.635135 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 00:20:56.436679333 +0000 UTC Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.682383 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.682443 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.682460 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.682490 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.682512 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.785626 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.785687 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.785707 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.785733 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.785748 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.888045 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.888102 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.888114 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.888135 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.888148 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.990835 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.990950 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.990969 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.990998 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:07 crc kubenswrapper[5032]: I0122 16:06:07.991020 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:07Z","lastTransitionTime":"2026-01-22T16:06:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.093359 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.093417 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.093428 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.093447 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.093459 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.196596 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.196676 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.196688 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.196709 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.196722 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.299852 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.299936 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.299948 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.299966 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.299982 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.402936 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.403005 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.403028 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.403059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.403078 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.454633 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.454710 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.454768 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.454878 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:08 crc kubenswrapper[5032]: E0122 16:06:08.455090 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:08 crc kubenswrapper[5032]: E0122 16:06:08.455207 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:08 crc kubenswrapper[5032]: E0122 16:06:08.455341 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:08 crc kubenswrapper[5032]: E0122 16:06:08.455449 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.505249 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.505297 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.505309 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.505330 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.505347 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.608521 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.608566 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.608578 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.608595 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.608607 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.636345 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 03:40:49.307545188 +0000 UTC Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.711449 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.711507 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.711524 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.711553 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.711571 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.814495 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.814557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.814569 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.814590 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.814603 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.917904 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.917962 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.917974 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.917995 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:08 crc kubenswrapper[5032]: I0122 16:06:08.918010 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:08Z","lastTransitionTime":"2026-01-22T16:06:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.021372 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.021421 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.021432 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.021451 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.021462 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.123943 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.124010 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.124029 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.124056 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.124077 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.227698 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.227790 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.227809 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.227835 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.227882 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.331769 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.331829 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.331840 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.331879 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.331892 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.433958 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.434020 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.434037 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.434058 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.434074 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.536709 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.536777 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.536797 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.536828 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.536852 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.637088 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 17:00:41.432605724 +0000 UTC Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.639333 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.639396 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.639414 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.639440 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.639460 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.747516 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.747605 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.747626 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.747656 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.747686 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.851755 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.851833 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.851854 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.851908 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.851930 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.955438 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.955496 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.955511 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.955533 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:09 crc kubenswrapper[5032]: I0122 16:06:09.955547 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:09Z","lastTransitionTime":"2026-01-22T16:06:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.060666 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.060741 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.060760 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.060789 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.060810 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.164480 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.164551 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.164578 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.164609 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.164635 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.268299 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.268372 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.268392 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.268420 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.268441 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.372716 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.372793 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.372810 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.372842 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.372897 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.454393 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.454393 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.454407 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.454427 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.454690 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.454898 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.455068 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.455178 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.475704 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.475766 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.475784 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.475809 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.475824 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.579758 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.579843 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.579897 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.579929 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.579948 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.638191 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 20:34:38.457427145 +0000 UTC Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.683446 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.683523 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.683542 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.683573 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.683595 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.786637 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.786705 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.786722 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.786748 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.786768 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.864335 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.864451 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.864462 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.864503 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.864516 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.883686 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:10Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.889725 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.889779 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.889793 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.889815 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.889827 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.911245 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:10Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.918268 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.918403 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.918422 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.918455 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.918505 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.942300 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:10Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.948453 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.948536 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.948552 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.948596 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.948611 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:10 crc kubenswrapper[5032]: E0122 16:06:10.971615 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:10Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.977889 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.978057 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.978080 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.978117 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:10 crc kubenswrapper[5032]: I0122 16:06:10.978139 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:10Z","lastTransitionTime":"2026-01-22T16:06:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: E0122 16:06:11.001238 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:10Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:11 crc kubenswrapper[5032]: E0122 16:06:11.001470 5032 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.004993 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.005043 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.005066 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.005096 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.005118 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.107919 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.108006 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.108024 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.108051 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.108074 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.211006 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.211098 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.211126 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.211162 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.211192 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.323282 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.323361 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.323381 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.323413 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.323434 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.427631 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.427731 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.427759 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.427793 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.427814 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.531417 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.531540 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.531561 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.531594 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.531669 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.634580 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.634660 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.634684 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.634715 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.634737 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.638895 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 04:47:27.497536479 +0000 UTC Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.738736 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.738822 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.738841 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.738930 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.739001 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.842965 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.843111 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.843132 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.843165 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.843190 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.947123 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.947194 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.947215 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.947242 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:11 crc kubenswrapper[5032]: I0122 16:06:11.947262 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:11Z","lastTransitionTime":"2026-01-22T16:06:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.051520 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.051590 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.051610 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.051637 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.051657 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.156073 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.156165 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.156194 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.156294 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.156328 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.259837 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.259941 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.259959 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.259984 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.260001 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.363523 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.363588 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.363606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.363631 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.363652 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.454259 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.454336 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.454336 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.454486 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:12 crc kubenswrapper[5032]: E0122 16:06:12.454478 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:12 crc kubenswrapper[5032]: E0122 16:06:12.454610 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:12 crc kubenswrapper[5032]: E0122 16:06:12.454733 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:12 crc kubenswrapper[5032]: E0122 16:06:12.454672 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.466280 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.466408 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.466444 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.466478 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.466504 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.571453 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.571519 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.571536 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.571564 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.571658 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.640061 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 23:09:49.106003016 +0000 UTC Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.675105 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.675194 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.675213 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.675271 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.675292 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.779227 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.779297 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.779318 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.779349 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.779373 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.883015 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.883159 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.883215 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.883245 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.883296 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.986944 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.987123 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.987148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.987211 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:12 crc kubenswrapper[5032]: I0122 16:06:12.987230 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:12Z","lastTransitionTime":"2026-01-22T16:06:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.090946 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.091118 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.091137 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.091199 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.091222 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.195642 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.195710 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.195728 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.195755 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.195773 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.299639 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.299764 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.299932 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.299968 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.300045 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.403481 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.403556 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.403575 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.403606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.403636 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.455077 5032 scope.go:117] "RemoveContainer" containerID="175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.507363 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.507430 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.507453 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.507483 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.507503 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.610492 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.611375 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.611397 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.611445 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.611464 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.640604 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 15:03:56.631207509 +0000 UTC Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.715546 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.715647 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.715699 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.715728 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.715784 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.818943 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.819051 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.819071 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.819100 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.819125 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.922935 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.923050 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.923069 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.923099 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:13 crc kubenswrapper[5032]: I0122 16:06:13.923119 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:13Z","lastTransitionTime":"2026-01-22T16:06:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.026799 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.026893 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.026962 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.026990 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.027011 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.118358 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/2.log" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.122755 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.123709 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.135696 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.135743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.135761 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.135780 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.135795 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.146017 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.167035 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.182827 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.208050 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.224598 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.239775 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.239839 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.239873 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.239901 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.239917 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.244769 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"2026-01-22T16:05:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92\\\\n2026-01-22T16:05:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92 to /host/opt/cni/bin/\\\\n2026-01-22T16:05:17Z [verbose] multus-daemon started\\\\n2026-01-22T16:05:17Z [verbose] Readiness Indicator file check\\\\n2026-01-22T16:06:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:06:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.266187 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.287187 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.306815 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.329783 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.343150 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.343194 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.343205 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.343223 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.343237 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.352473 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.387254 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:06:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.404631 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.427843 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.446699 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.446776 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.446796 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.446828 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.446850 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.452479 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.454713 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.454796 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.454998 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:14 crc kubenswrapper[5032]: E0122 16:06:14.454994 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.454729 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:14 crc kubenswrapper[5032]: E0122 16:06:14.455158 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:14 crc kubenswrapper[5032]: E0122 16:06:14.455270 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:14 crc kubenswrapper[5032]: E0122 16:06:14.455369 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.472326 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:14Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.549853 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.549918 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.549930 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.549951 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.549965 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.641248 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 01:35:39.221106828 +0000 UTC Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.653229 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.653307 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.653326 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.653357 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.653383 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.756830 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.756964 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.756984 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.757013 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.757034 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.860503 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.860565 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.860583 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.860614 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.860635 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.964557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.964595 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.964611 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.964636 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:14 crc kubenswrapper[5032]: I0122 16:06:14.964650 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:14Z","lastTransitionTime":"2026-01-22T16:06:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.068183 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.068241 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.068259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.068282 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.068295 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.171225 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.171276 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.171287 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.171309 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.171323 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.273846 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.273905 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.273916 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.273933 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.273944 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.377480 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.377548 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.377566 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.377593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.377611 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.480938 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.480993 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.481003 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.481024 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.481036 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.584941 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.585010 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.585031 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.585060 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.585080 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.642185 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 23:19:07.855855462 +0000 UTC Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.688552 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.688628 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.688654 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.688691 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.688715 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.791483 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.791557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.791578 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.791609 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.791634 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.895362 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.895417 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.895430 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.895452 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.895468 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.999121 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.999196 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.999221 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.999255 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:15 crc kubenswrapper[5032]: I0122 16:06:15.999283 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:15Z","lastTransitionTime":"2026-01-22T16:06:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.102013 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.102085 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.102101 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.102127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.102146 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.133844 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/3.log" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.134776 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/2.log" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.138261 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" exitCode=1 Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.138319 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.138365 5032 scope.go:117] "RemoveContainer" containerID="175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.139712 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:06:16 crc kubenswrapper[5032]: E0122 16:06:16.140043 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.166943 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.190412 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.207164 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.207211 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.207224 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.207250 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.207268 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.207570 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.231132 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.252587 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.278318 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.293764 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.313425 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.313477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.313487 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.313507 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.313519 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.316488 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.337192 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.360767 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"2026-01-22T16:05:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92\\\\n2026-01-22T16:05:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92 to /host/opt/cni/bin/\\\\n2026-01-22T16:05:17Z [verbose] multus-daemon started\\\\n2026-01-22T16:05:17Z [verbose] Readiness Indicator file check\\\\n2026-01-22T16:06:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:06:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.374880 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.392786 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.413997 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.416259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.416310 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.416323 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.416345 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.416361 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.438137 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.454539 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.454599 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.454655 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.454765 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:16 crc kubenswrapper[5032]: E0122 16:06:16.454837 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:16 crc kubenswrapper[5032]: E0122 16:06:16.455068 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:16 crc kubenswrapper[5032]: E0122 16:06:16.455716 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:16 crc kubenswrapper[5032]: E0122 16:06:16.456107 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.472525 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:15Z\\\",\\\"message\\\":\\\", Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0122 16:06:15.613531 7117 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:06:15.613643 7117 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0122 16:06:15.613693 7117 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication/oauth-openshift]} name:Service_openshift-authentication/oauth-openshift_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.222:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c0c2f725-e461-454e-a88c-c8350d62e1ef}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0122 16:06:15.613753 7117 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc anno\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:06:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.489161 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.506940 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.519892 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.519960 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.519981 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.520013 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.520035 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.524474 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.541897 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.562777 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.579253 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.602935 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.619536 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.623543 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.623712 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.623824 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.623985 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.624131 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.639135 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.642504 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 08:18:19.966335906 +0000 UTC Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.658336 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.682351 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"2026-01-22T16:05:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92\\\\n2026-01-22T16:05:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92 to /host/opt/cni/bin/\\\\n2026-01-22T16:05:17Z [verbose] multus-daemon started\\\\n2026-01-22T16:05:17Z [verbose] Readiness Indicator file check\\\\n2026-01-22T16:06:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:06:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.704313 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.722904 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.727989 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.728030 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.728039 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.728059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.728071 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.748444 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.772748 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.808191 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:15Z\\\",\\\"message\\\":\\\", Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0122 16:06:15.613531 7117 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:06:15.613643 7117 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0122 16:06:15.613693 7117 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication/oauth-openshift]} name:Service_openshift-authentication/oauth-openshift_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.222:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c0c2f725-e461-454e-a88c-c8350d62e1ef}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0122 16:06:15.613753 7117 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc anno\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:06:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.825053 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:16Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.831808 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.831951 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.831981 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.832016 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.832041 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.936012 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.936069 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.936088 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.936113 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:16 crc kubenswrapper[5032]: I0122 16:06:16.936131 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:16Z","lastTransitionTime":"2026-01-22T16:06:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.039680 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.039752 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.039776 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.039808 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.039831 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.142500 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.142946 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.143113 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.143296 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.143493 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.146205 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/3.log" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.247973 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.248037 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.248053 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.248077 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.248097 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.351453 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.351550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.351575 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.351607 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.351629 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.455020 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.455543 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.455693 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.461075 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.461128 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.564720 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.564775 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.564792 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.564817 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.564836 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.643798 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 11:57:18.703379048 +0000 UTC Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.668593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.668657 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.668675 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.668701 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.668720 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.772100 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.772545 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.772704 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.772962 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.773130 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.876193 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.876226 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.876236 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.876250 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.876259 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.979721 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.979782 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.979802 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.979833 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:17 crc kubenswrapper[5032]: I0122 16:06:17.979852 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:17Z","lastTransitionTime":"2026-01-22T16:06:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.083538 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.083607 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.083624 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.083649 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.083668 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.187161 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.187228 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.187250 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.187281 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.187301 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.290844 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.290950 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.290971 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.291001 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.291022 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.394130 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.394207 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.394230 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.394261 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.394283 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.454078 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.454128 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.454173 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:18 crc kubenswrapper[5032]: E0122 16:06:18.454272 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:18 crc kubenswrapper[5032]: E0122 16:06:18.454578 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:18 crc kubenswrapper[5032]: E0122 16:06:18.454910 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.455372 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:18 crc kubenswrapper[5032]: E0122 16:06:18.455666 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.496953 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.497030 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.497056 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.497092 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.497117 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.600163 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.600466 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.600772 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.601224 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.601387 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.644944 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 14:52:57.488144924 +0000 UTC Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.704126 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.704200 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.704218 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.704243 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.704262 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.807431 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.807815 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.807936 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.808090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.808187 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.911969 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.912046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.912071 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.912104 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:18 crc kubenswrapper[5032]: I0122 16:06:18.912133 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:18Z","lastTransitionTime":"2026-01-22T16:06:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.016146 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.016236 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.016267 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.016307 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.016336 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.120031 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.120144 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.120178 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.120208 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.120230 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.223046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.223096 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.223107 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.223127 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.223142 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.325908 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.325990 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.326010 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.326048 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.326081 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.428891 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.428969 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.428990 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.429024 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.429044 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.532557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.533045 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.533202 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.533375 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.533518 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.637675 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.638232 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.638273 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.638307 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.638332 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.645952 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 19:41:59.983008457 +0000 UTC Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.743569 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.743643 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.743666 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.743743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.743765 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.848040 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.848111 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.848136 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.848174 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.848199 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.952017 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.952083 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.952112 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.952139 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:19 crc kubenswrapper[5032]: I0122 16:06:19.952156 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:19Z","lastTransitionTime":"2026-01-22T16:06:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.055593 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.055669 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.055690 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.055717 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.055736 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.158897 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.158969 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.158982 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.159128 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.159151 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.262788 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.262853 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.262899 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.262927 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.262942 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.365924 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.366007 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.366076 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.366103 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.366120 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.422815 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.423023 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:24.422988979 +0000 UTC m=+148.235158020 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.423086 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.423215 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.423365 5032 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.423398 5032 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.423515 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:07:24.423476561 +0000 UTC m=+148.235645642 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.423572 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 16:07:24.423549243 +0000 UTC m=+148.235718524 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.454621 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.454662 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.454727 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.454779 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.454797 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.454931 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.455134 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.455207 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.468639 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.468678 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.468690 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.468705 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.468717 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.524462 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.524522 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524678 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524707 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524679 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524744 5032 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524721 5032 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524758 5032 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524846 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 16:07:24.524815234 +0000 UTC m=+148.336984285 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:06:20 crc kubenswrapper[5032]: E0122 16:06:20.524917 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 16:07:24.524902406 +0000 UTC m=+148.337071457 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.572047 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.572124 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.572136 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.572153 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.572167 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.646821 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 05:58:38.884450562 +0000 UTC Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.675606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.675656 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.675668 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.675686 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.675698 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.778421 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.778478 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.778495 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.778525 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.778543 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.882006 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.882086 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.882113 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.882148 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.882170 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.985234 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.985316 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.985335 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.985363 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:20 crc kubenswrapper[5032]: I0122 16:06:20.985385 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:20Z","lastTransitionTime":"2026-01-22T16:06:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.088416 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.088481 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.088524 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.088550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.088565 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.108174 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.108205 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.108213 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.108226 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.108237 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: E0122 16:06:21.133289 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.137791 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.137844 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.137872 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.137896 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.137911 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: E0122 16:06:21.153778 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.158975 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.159017 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.159031 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.159053 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.159066 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: E0122 16:06:21.174786 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.179269 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.179312 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.179329 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.179351 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.179370 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: E0122 16:06:21.194385 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.199999 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.200049 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.200066 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.200086 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.200100 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: E0122 16:06:21.218935 5032 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a0e083b8-b5a8-42a6-8604-3847e4e28cf7\\\",\\\"systemUUID\\\":\\\"2f63932e-1307-43a5-8503-03171219a9a3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:21Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:21 crc kubenswrapper[5032]: E0122 16:06:21.219143 5032 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.221529 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.221579 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.221594 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.221613 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.221627 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.325180 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.325237 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.325252 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.325277 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.325303 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.429743 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.429833 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.429906 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.429934 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.429953 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.534352 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.534434 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.534472 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.534509 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.534532 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.637529 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.637603 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.637621 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.637654 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.637680 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.647707 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 02:55:11.51395687 +0000 UTC Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.740622 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.740677 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.740691 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.740710 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.740722 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.844013 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.844075 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.844086 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.844107 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.844121 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.947406 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.947449 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.947461 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.947481 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:21 crc kubenswrapper[5032]: I0122 16:06:21.947494 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:21Z","lastTransitionTime":"2026-01-22T16:06:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.050038 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.050101 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.050119 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.050147 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.050167 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.153385 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.153431 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.153444 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.153464 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.153474 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.257160 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.257349 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.257421 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.257457 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.257481 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.362674 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.362735 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.362748 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.362773 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.362796 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.454267 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.454313 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.454469 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:22 crc kubenswrapper[5032]: E0122 16:06:22.454689 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.454724 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:22 crc kubenswrapper[5032]: E0122 16:06:22.454962 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:22 crc kubenswrapper[5032]: E0122 16:06:22.455056 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:22 crc kubenswrapper[5032]: E0122 16:06:22.455190 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.465805 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.465901 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.465922 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.465969 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.465990 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.569877 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.569923 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.569936 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.569956 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.569969 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.648056 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 04:34:16.317357468 +0000 UTC Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.672646 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.672708 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.672728 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.672757 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.672778 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.776649 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.776713 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.776732 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.776764 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.776786 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.879902 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.879961 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.879984 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.880017 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.880038 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.982901 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.982959 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.982972 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.982992 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:22 crc kubenswrapper[5032]: I0122 16:06:22.983003 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:22Z","lastTransitionTime":"2026-01-22T16:06:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.085835 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.085921 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.085934 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.085956 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.085971 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.188493 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.188543 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.188554 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.188576 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.188587 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.291437 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.291476 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.291487 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.291507 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.291520 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.395751 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.395807 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.395818 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.395838 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.395850 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.499299 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.499365 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.499384 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.499411 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.499430 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.602088 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.602133 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.602142 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.602157 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.602170 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.649147 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 00:10:55.772176096 +0000 UTC Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.705402 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.705461 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.705477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.705501 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.705521 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.808289 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.808369 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.808390 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.808418 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.808436 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.912013 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.912069 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.912090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.912116 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:23 crc kubenswrapper[5032]: I0122 16:06:23.912133 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:23Z","lastTransitionTime":"2026-01-22T16:06:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.017103 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.017215 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.017241 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.017267 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.017285 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.120426 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.120499 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.120517 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.120545 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.120563 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.224295 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.224342 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.224365 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.224387 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.224404 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.328295 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.328363 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.328379 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.328407 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.328424 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.430386 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.430465 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.430482 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.430500 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.430510 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.454047 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.454095 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.454051 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:24 crc kubenswrapper[5032]: E0122 16:06:24.454231 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.454301 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:24 crc kubenswrapper[5032]: E0122 16:06:24.454527 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:24 crc kubenswrapper[5032]: E0122 16:06:24.454585 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:24 crc kubenswrapper[5032]: E0122 16:06:24.454676 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.538761 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.538817 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.538833 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.538881 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.538903 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.642154 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.642211 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.642224 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.642248 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.642263 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.649324 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 12:39:29.884716966 +0000 UTC Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.745655 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.745710 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.745721 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.745742 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.745756 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.847954 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.848004 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.848014 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.848035 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.848077 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.950271 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.950334 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.950353 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.950378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:24 crc kubenswrapper[5032]: I0122 16:06:24.950398 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:24Z","lastTransitionTime":"2026-01-22T16:06:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.053354 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.053441 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.053461 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.053492 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.053511 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.156314 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.156363 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.156378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.156397 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.156408 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.259303 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.259350 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.259363 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.259378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.259389 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.363415 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.363458 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.363466 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.363487 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.363497 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.467002 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.467049 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.467063 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.467084 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.467094 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.570168 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.570216 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.570225 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.570247 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.570265 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.650282 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 15:37:27.346662922 +0000 UTC Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.674003 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.674044 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.674055 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.674075 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.674086 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.777917 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.777991 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.778013 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.778045 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.778063 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.880880 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.880924 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.880933 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.880965 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.880977 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.983579 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.983658 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.983677 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.983701 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:25 crc kubenswrapper[5032]: I0122 16:06:25.983719 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:25Z","lastTransitionTime":"2026-01-22T16:06:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.086083 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.086149 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.086169 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.086198 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.086217 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.189064 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.189123 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.189143 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.189178 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.189203 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.292471 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.292517 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.292529 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.292551 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.292564 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.395206 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.395391 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.395410 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.395438 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.395456 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.454050 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.454283 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.454369 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:26 crc kubenswrapper[5032]: E0122 16:06:26.454468 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.454512 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:26 crc kubenswrapper[5032]: E0122 16:06:26.454705 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:26 crc kubenswrapper[5032]: E0122 16:06:26.454995 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:26 crc kubenswrapper[5032]: E0122 16:06:26.455275 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.476464 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.498967 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.499045 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.499059 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.499106 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.499124 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.499024 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1410b11-001c-4b31-ab18-02e37bc11454\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a85c35f2193efeec643db47d91413639101b9dcb05dc6e2888b26d1f402ae6b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1170c63f9be0b3dad103dc1d3400c2e57a80229f2f48c15bb60b703eb2a0dd41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dcee4f81d98b29d465b7c855a9be2040b6c2f4288684d721dcf44c90aebaa67\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de95116c8e3968e5dcfe7498472eefff7f7215d0f8302efe5ba1f87364c8b1a9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc24198a739498ca46fbe85a278ccf9ef842c31e981c0fca9464ca9e5bb1ff6f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d86f73e584cc8d85ef2a6bfa942c1f632875b978d415466e9286d8215322727c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1ac566a01259f10f538df1ee7009ae9b922d8a0fb08f9a51ff83d11e4b06428\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22dpc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5gn8h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.518889 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-95g6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsj2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-95g6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.535100 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378ad6bf-0f91-402f-a16b-76968884acea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"le observer\\\\nW0122 16:05:16.397244 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 16:05:16.397441 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 16:05:16.398192 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1944306817/tls.crt::/tmp/serving-cert-1944306817/tls.key\\\\\\\"\\\\nI0122 16:05:16.571233 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 16:05:16.578146 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 16:05:16.578201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 16:05:16.578250 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 16:05:16.578314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 16:05:16.597882 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 16:05:16.597909 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597915 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 16:05:16.597920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 16:05:16.597924 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 16:05:16.597927 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 16:05:16.597930 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 16:05:16.598179 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 16:05:16.601288 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.550673 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rbbd6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fa8611e-686b-432f-8871-e6b5645e3108\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a717d5fee2ef2e178b119efd9094c6a6eb3da4bf8704b3c86455feaa447e6e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pkf6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rbbd6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.563589 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zfz9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d111dfc5-e30e-477d-81c1-afcc5bd064ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:06:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:03Z\\\",\\\"message\\\":\\\"2026-01-22T16:05:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92\\\\n2026-01-22T16:05:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a6da0c10-7ef9-44fe-bde0-67f81bbc4f92 to /host/opt/cni/bin/\\\\n2026-01-22T16:05:17Z [verbose] multus-daemon started\\\\n2026-01-22T16:05:17Z [verbose] Readiness Indicator file check\\\\n2026-01-22T16:06:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:06:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5cr55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zfz9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.584173 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56903a8f-394c-4fbf-90fc-8c058c31c8d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://721ba1b7e5159e59eb97175415752855faf22e6d4abb7f301bfe2f19f6e548f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3afdb99b0446cedef048ffe3eaae67ee66f415a54cb4c7963942c012f703f66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fgpgq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:29Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2rfvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.601306 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.601386 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.601413 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.601456 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.601476 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.601820 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b45d929-f8bc-4b6c-8710-bb113354a059\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:04:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c179a364aaefe1cd1825a612673597f0ead6476e3b3438682f18a87bc53c4298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c794672a007a6fc2008571b833f3eea416949ae76e5a1114d0e8e356660fe647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09433df7acc4baad26951f3e04981297d6ec2a5e3fad75202b0ab1a85d569d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:04:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9da3c52f68a6bfab4a1ebd73ae87d1c99387b153164fb7a3551ff073d7dc41b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:04:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:04:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:04:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.622146 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f4f794f18cf332d397609c59f5978285f6f41b2d277c9a3a8cd9571c7375a24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.638497 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.651157 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 02:25:41.823293081 +0000 UTC Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.654137 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.693669 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://175b7fd0b0493c3d96b3634a04e4b4bb74e60d144c0b707cf82faa50d609b7ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:05:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 16:05:45.538423 6732 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538618 6732 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI0122 16:05:45.538762 6732 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 16:05:45.538786 6732 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 16:05:45.538800 6732 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0122 16:05:45.538814 6732 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0122 16:05:45.538832 6732 factory.go:656] Stopping watch factory\\\\nI0122 16:05:45.538850 6732 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0122 16:05:45.538876 6732 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0122 16:05:45.538766 6732 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 16:05:45.538844 6732 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 16:05:45.538938 6732 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T16:06:15Z\\\",\\\"message\\\":\\\", Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0122 16:06:15.613531 7117 ovnkube.go:599] Stopped ovnkube\\\\nI0122 16:06:15.613643 7117 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0122 16:06:15.613693 7117 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication/oauth-openshift]} name:Service_openshift-authentication/oauth-openshift_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.222:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c0c2f725-e461-454e-a88c-c8350d62e1ef}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0122 16:06:15.613753 7117 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc anno\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T16:06:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-psnqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5q2w8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.706753 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.706803 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.706816 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.706833 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.706844 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.710590 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-th95w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a49ee60-0dea-4adf-bcf7-d5717c64218e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1c319b39075523b0c6c9386d1b4092d8eb61aec9e5049193a8b31fc603bb8c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rccfg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:18Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-th95w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.723684 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.735843 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.747626 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fee2227c-a587-4ec3-909d-bd355aa116d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10c5f789625fec8117fa8b815b62397a0efe4fb8a968b00a543262c4f8ae6923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bthxt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T16:05:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6qsfs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:26Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.810849 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.810904 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.810919 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.810936 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.810949 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.913809 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.913909 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.913927 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.913974 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:26 crc kubenswrapper[5032]: I0122 16:06:26.913992 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:26Z","lastTransitionTime":"2026-01-22T16:06:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.016952 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.017047 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.017072 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.017112 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.017140 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.119428 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.119486 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.119500 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.119523 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.119539 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.223253 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.223309 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.223321 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.223341 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.223354 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.326980 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.327095 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.327120 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.327154 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.327177 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.430535 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.430606 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.430618 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.430640 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.430654 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.456127 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:06:27 crc kubenswrapper[5032]: E0122 16:06:27.456538 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.480021 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b7a19dd2c8a3d9dde2c88bef925009c770cea0d8363f260b8055321461444fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:27Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.500917 5032 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T16:05:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6267d10b7b408fb7571def980b05ae62b53c538f7f7a8d4316f8c72994cd4719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae4c67acdd1c6388f5969b96dfd45da87d2bc2a8a24b9492d3896007f4c91b3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T16:05:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T16:06:27Z is after 2025-08-24T17:21:41Z" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.534247 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podStartSLOduration=71.534212518 podStartE2EDuration="1m11.534212518s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.532149108 +0000 UTC m=+91.344318149" watchObservedRunningTime="2026-01-22 16:06:27.534212518 +0000 UTC m=+91.346381589" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.536964 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.537025 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.537042 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.537072 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.537090 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.580814 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=71.580781724 podStartE2EDuration="1m11.580781724s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.561614197 +0000 UTC m=+91.373783248" watchObservedRunningTime="2026-01-22 16:06:27.580781724 +0000 UTC m=+91.392950775" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.606338 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-5gn8h" podStartSLOduration=71.606308997 podStartE2EDuration="1m11.606308997s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.60600292 +0000 UTC m=+91.418171981" watchObservedRunningTime="2026-01-22 16:06:27.606308997 +0000 UTC m=+91.418478028" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.640471 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.640540 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.640555 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.640581 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.640597 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.646161 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=39.646134829 podStartE2EDuration="39.646134829s" podCreationTimestamp="2026-01-22 16:05:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.646015956 +0000 UTC m=+91.458185007" watchObservedRunningTime="2026-01-22 16:06:27.646134829 +0000 UTC m=+91.458303860" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.651844 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 22:06:22.11175927 +0000 UTC Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.667786 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-rbbd6" podStartSLOduration=72.667754377 podStartE2EDuration="1m12.667754377s" podCreationTimestamp="2026-01-22 16:05:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.667008318 +0000 UTC m=+91.479177369" watchObservedRunningTime="2026-01-22 16:06:27.667754377 +0000 UTC m=+91.479923408" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.684067 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-zfz9c" podStartSLOduration=71.684038914 podStartE2EDuration="1m11.684038914s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.683000249 +0000 UTC m=+91.495169310" watchObservedRunningTime="2026-01-22 16:06:27.684038914 +0000 UTC m=+91.496207985" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.701618 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2rfvn" podStartSLOduration=70.701569662 podStartE2EDuration="1m10.701569662s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.700065035 +0000 UTC m=+91.512234096" watchObservedRunningTime="2026-01-22 16:06:27.701569662 +0000 UTC m=+91.513738703" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.742995 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.743046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.743060 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.743082 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.743095 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.845811 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.845913 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.845929 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.845954 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.845967 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.949557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.949614 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.949627 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.949650 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:27 crc kubenswrapper[5032]: I0122 16:06:27.949663 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:27Z","lastTransitionTime":"2026-01-22T16:06:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.053378 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.053451 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.053470 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.053502 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.053525 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.157795 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.157840 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.157851 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.157888 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.157904 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.262172 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.262259 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.262276 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.262307 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.262326 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.365159 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.365243 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.365267 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.365301 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.365329 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.454830 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.454912 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.455002 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.455257 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:28 crc kubenswrapper[5032]: E0122 16:06:28.455469 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:28 crc kubenswrapper[5032]: E0122 16:06:28.455558 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:28 crc kubenswrapper[5032]: E0122 16:06:28.455719 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:28 crc kubenswrapper[5032]: E0122 16:06:28.455826 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.468462 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.468532 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.468550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.468578 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.468598 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.483185 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-th95w" podStartSLOduration=72.483154622 podStartE2EDuration="1m12.483154622s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:27.798501927 +0000 UTC m=+91.610670958" watchObservedRunningTime="2026-01-22 16:06:28.483154622 +0000 UTC m=+92.295323673" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.483834 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.571477 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.571550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.571572 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.571601 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.571623 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.652626 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 17:18:58.55104197 +0000 UTC Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.674974 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.675084 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.675105 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.675135 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.675155 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.778101 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.778159 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.778171 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.778191 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.778204 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.882434 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.882484 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.882496 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.882517 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.882534 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.985507 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.985546 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.985557 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.985571 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:28 crc kubenswrapper[5032]: I0122 16:06:28.985581 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:28Z","lastTransitionTime":"2026-01-22T16:06:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.089006 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.089266 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.089289 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.089320 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.089348 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.192993 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.193046 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.193063 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.193090 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.193109 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.296552 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.296632 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.296649 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.296673 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.296692 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.401229 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.401289 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.401313 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.401345 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.401401 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.504377 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.504545 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.504576 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.504689 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.504793 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.608689 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.608759 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.608778 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.608808 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.608827 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.656614 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 01:51:41.754160766 +0000 UTC Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.711979 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.712029 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.712041 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.712061 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.712073 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.814893 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.814970 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.814994 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.815022 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.815041 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.918584 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.918662 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.918683 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.918710 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:29 crc kubenswrapper[5032]: I0122 16:06:29.918732 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:29Z","lastTransitionTime":"2026-01-22T16:06:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.022794 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.022900 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.022928 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.022959 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.023013 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.126419 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.126497 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.126516 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.126550 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.126576 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.230204 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.230310 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.230336 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.230371 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.230390 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.333168 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.333222 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.333234 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.333255 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.333275 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.436219 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.436300 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.436313 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.436338 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.436354 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.453714 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.453717 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:30 crc kubenswrapper[5032]: E0122 16:06:30.453887 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.454033 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.454038 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:30 crc kubenswrapper[5032]: E0122 16:06:30.454345 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:30 crc kubenswrapper[5032]: E0122 16:06:30.454522 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:30 crc kubenswrapper[5032]: E0122 16:06:30.454620 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.540304 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.540370 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.540389 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.540419 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.540440 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.643202 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.643260 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.643276 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.643301 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.643318 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.657773 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 08:32:05.924551976 +0000 UTC Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.746663 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.746726 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.746747 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.746774 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.746796 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.849603 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.849674 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.849777 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.849819 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.849845 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.953267 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.953346 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.953360 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.953377 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:30 crc kubenswrapper[5032]: I0122 16:06:30.953390 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:30Z","lastTransitionTime":"2026-01-22T16:06:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.056664 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.056728 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.056744 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.056768 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.056782 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:31Z","lastTransitionTime":"2026-01-22T16:06:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.160821 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.160948 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.160976 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.161010 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.161033 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:31Z","lastTransitionTime":"2026-01-22T16:06:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.264370 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.264435 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.264455 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.264480 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.264495 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:31Z","lastTransitionTime":"2026-01-22T16:06:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.367163 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.367228 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.367243 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.367269 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.367285 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:31Z","lastTransitionTime":"2026-01-22T16:06:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.470896 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.470932 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.470943 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.470964 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.470974 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:31Z","lastTransitionTime":"2026-01-22T16:06:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.474565 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.573681 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.573770 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.573795 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.573821 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.573838 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:31Z","lastTransitionTime":"2026-01-22T16:06:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.596774 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.596835 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.596850 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.596899 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.596920 5032 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T16:06:31Z","lastTransitionTime":"2026-01-22T16:06:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.646833 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf"] Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.647362 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.651163 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.651560 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.651932 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.652005 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.658399 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 15:19:33.861574941 +0000 UTC Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.658445 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.669831 5032 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.682648 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=3.682623437 podStartE2EDuration="3.682623437s" podCreationTimestamp="2026-01-22 16:06:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:31.664978657 +0000 UTC m=+95.477147758" watchObservedRunningTime="2026-01-22 16:06:31.682623437 +0000 UTC m=+95.494792478" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.749854 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=0.749827377 podStartE2EDuration="749.827377ms" podCreationTimestamp="2026-01-22 16:06:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:31.749415137 +0000 UTC m=+95.561584198" watchObservedRunningTime="2026-01-22 16:06:31.749827377 +0000 UTC m=+95.561996428" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.761616 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c434e00-6c08-499c-8ef2-4ac677c1b228-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.761693 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7c434e00-6c08-499c-8ef2-4ac677c1b228-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.761801 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7c434e00-6c08-499c-8ef2-4ac677c1b228-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.761842 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c434e00-6c08-499c-8ef2-4ac677c1b228-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.761872 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7c434e00-6c08-499c-8ef2-4ac677c1b228-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.862765 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c434e00-6c08-499c-8ef2-4ac677c1b228-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.862827 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7c434e00-6c08-499c-8ef2-4ac677c1b228-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.862884 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7c434e00-6c08-499c-8ef2-4ac677c1b228-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.862913 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c434e00-6c08-499c-8ef2-4ac677c1b228-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.862928 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7c434e00-6c08-499c-8ef2-4ac677c1b228-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.862936 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7c434e00-6c08-499c-8ef2-4ac677c1b228-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.863055 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7c434e00-6c08-499c-8ef2-4ac677c1b228-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.864350 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7c434e00-6c08-499c-8ef2-4ac677c1b228-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.874308 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c434e00-6c08-499c-8ef2-4ac677c1b228-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.880377 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c434e00-6c08-499c-8ef2-4ac677c1b228-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k7mmf\" (UID: \"7c434e00-6c08-499c-8ef2-4ac677c1b228\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:31 crc kubenswrapper[5032]: I0122 16:06:31.973995 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" Jan 22 16:06:32 crc kubenswrapper[5032]: W0122 16:06:32.009991 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c434e00_6c08_499c_8ef2_4ac677c1b228.slice/crio-ef2e4c71759c2d4729423dbf238ba8177ed3c164f2f4eb644c8d2d59f32f5832 WatchSource:0}: Error finding container ef2e4c71759c2d4729423dbf238ba8177ed3c164f2f4eb644c8d2d59f32f5832: Status 404 returned error can't find the container with id ef2e4c71759c2d4729423dbf238ba8177ed3c164f2f4eb644c8d2d59f32f5832 Jan 22 16:06:32 crc kubenswrapper[5032]: I0122 16:06:32.212824 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" event={"ID":"7c434e00-6c08-499c-8ef2-4ac677c1b228","Type":"ContainerStarted","Data":"2d6f6c0108bca18963c8988e98b2abb63963b3f615194c41a230708723093cca"} Jan 22 16:06:32 crc kubenswrapper[5032]: I0122 16:06:32.212901 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" event={"ID":"7c434e00-6c08-499c-8ef2-4ac677c1b228","Type":"ContainerStarted","Data":"ef2e4c71759c2d4729423dbf238ba8177ed3c164f2f4eb644c8d2d59f32f5832"} Jan 22 16:06:32 crc kubenswrapper[5032]: I0122 16:06:32.239960 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7mmf" podStartSLOduration=76.239921475 podStartE2EDuration="1m16.239921475s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:32.238653744 +0000 UTC m=+96.050822845" watchObservedRunningTime="2026-01-22 16:06:32.239921475 +0000 UTC m=+96.052090566" Jan 22 16:06:32 crc kubenswrapper[5032]: I0122 16:06:32.453673 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:32 crc kubenswrapper[5032]: I0122 16:06:32.453925 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:32 crc kubenswrapper[5032]: E0122 16:06:32.453947 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:32 crc kubenswrapper[5032]: I0122 16:06:32.453965 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:32 crc kubenswrapper[5032]: I0122 16:06:32.454045 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:32 crc kubenswrapper[5032]: E0122 16:06:32.454162 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:32 crc kubenswrapper[5032]: E0122 16:06:32.454403 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:32 crc kubenswrapper[5032]: E0122 16:06:32.454565 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:34 crc kubenswrapper[5032]: I0122 16:06:34.454625 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:34 crc kubenswrapper[5032]: I0122 16:06:34.454661 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:34 crc kubenswrapper[5032]: E0122 16:06:34.454885 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:34 crc kubenswrapper[5032]: I0122 16:06:34.455230 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:34 crc kubenswrapper[5032]: E0122 16:06:34.455409 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:34 crc kubenswrapper[5032]: E0122 16:06:34.455666 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:34 crc kubenswrapper[5032]: I0122 16:06:34.455940 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:34 crc kubenswrapper[5032]: E0122 16:06:34.456045 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:34 crc kubenswrapper[5032]: I0122 16:06:34.798554 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:34 crc kubenswrapper[5032]: E0122 16:06:34.798801 5032 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:06:34 crc kubenswrapper[5032]: E0122 16:06:34.799011 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs podName:8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2 nodeName:}" failed. No retries permitted until 2026-01-22 16:07:38.798957304 +0000 UTC m=+162.611126365 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs") pod "network-metrics-daemon-95g6b" (UID: "8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 16:06:36 crc kubenswrapper[5032]: I0122 16:06:36.453705 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:36 crc kubenswrapper[5032]: I0122 16:06:36.453775 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:36 crc kubenswrapper[5032]: E0122 16:06:36.454957 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:36 crc kubenswrapper[5032]: I0122 16:06:36.453992 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:36 crc kubenswrapper[5032]: I0122 16:06:36.453823 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:36 crc kubenswrapper[5032]: E0122 16:06:36.455407 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:36 crc kubenswrapper[5032]: E0122 16:06:36.455450 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:36 crc kubenswrapper[5032]: E0122 16:06:36.455591 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:38 crc kubenswrapper[5032]: I0122 16:06:38.454233 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:38 crc kubenswrapper[5032]: I0122 16:06:38.454241 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:38 crc kubenswrapper[5032]: I0122 16:06:38.454331 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:38 crc kubenswrapper[5032]: I0122 16:06:38.454409 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:38 crc kubenswrapper[5032]: E0122 16:06:38.454467 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:38 crc kubenswrapper[5032]: E0122 16:06:38.454583 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:38 crc kubenswrapper[5032]: E0122 16:06:38.454672 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:38 crc kubenswrapper[5032]: E0122 16:06:38.454800 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:39 crc kubenswrapper[5032]: I0122 16:06:39.455661 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:06:39 crc kubenswrapper[5032]: E0122 16:06:39.456109 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:06:40 crc kubenswrapper[5032]: I0122 16:06:40.455930 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:40 crc kubenswrapper[5032]: I0122 16:06:40.455948 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:40 crc kubenswrapper[5032]: I0122 16:06:40.455981 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:40 crc kubenswrapper[5032]: I0122 16:06:40.456000 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:40 crc kubenswrapper[5032]: E0122 16:06:40.456493 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:40 crc kubenswrapper[5032]: E0122 16:06:40.456580 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:40 crc kubenswrapper[5032]: E0122 16:06:40.456687 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:40 crc kubenswrapper[5032]: E0122 16:06:40.456750 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:42 crc kubenswrapper[5032]: I0122 16:06:42.455065 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:42 crc kubenswrapper[5032]: I0122 16:06:42.455191 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:42 crc kubenswrapper[5032]: E0122 16:06:42.455364 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:42 crc kubenswrapper[5032]: I0122 16:06:42.455444 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:42 crc kubenswrapper[5032]: E0122 16:06:42.455648 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:42 crc kubenswrapper[5032]: I0122 16:06:42.456029 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:42 crc kubenswrapper[5032]: E0122 16:06:42.455844 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:42 crc kubenswrapper[5032]: E0122 16:06:42.456195 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:44 crc kubenswrapper[5032]: I0122 16:06:44.454237 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:44 crc kubenswrapper[5032]: I0122 16:06:44.454401 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:44 crc kubenswrapper[5032]: I0122 16:06:44.454436 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:44 crc kubenswrapper[5032]: E0122 16:06:44.454614 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:44 crc kubenswrapper[5032]: I0122 16:06:44.455076 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:44 crc kubenswrapper[5032]: E0122 16:06:44.455577 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:44 crc kubenswrapper[5032]: E0122 16:06:44.455938 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:44 crc kubenswrapper[5032]: E0122 16:06:44.456117 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:44 crc kubenswrapper[5032]: I0122 16:06:44.477661 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 22 16:06:46 crc kubenswrapper[5032]: I0122 16:06:46.453706 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:46 crc kubenswrapper[5032]: I0122 16:06:46.453706 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:46 crc kubenswrapper[5032]: I0122 16:06:46.453817 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:46 crc kubenswrapper[5032]: I0122 16:06:46.453849 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:46 crc kubenswrapper[5032]: E0122 16:06:46.458457 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:46 crc kubenswrapper[5032]: E0122 16:06:46.458726 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:46 crc kubenswrapper[5032]: E0122 16:06:46.458835 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:46 crc kubenswrapper[5032]: E0122 16:06:46.459168 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:46 crc kubenswrapper[5032]: I0122 16:06:46.508960 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=2.508929782 podStartE2EDuration="2.508929782s" podCreationTimestamp="2026-01-22 16:06:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:06:46.508575153 +0000 UTC m=+110.320744254" watchObservedRunningTime="2026-01-22 16:06:46.508929782 +0000 UTC m=+110.321098853" Jan 22 16:06:48 crc kubenswrapper[5032]: I0122 16:06:48.453842 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:48 crc kubenswrapper[5032]: I0122 16:06:48.453945 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:48 crc kubenswrapper[5032]: E0122 16:06:48.454533 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:48 crc kubenswrapper[5032]: I0122 16:06:48.454095 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:48 crc kubenswrapper[5032]: I0122 16:06:48.454035 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:48 crc kubenswrapper[5032]: E0122 16:06:48.454660 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:48 crc kubenswrapper[5032]: E0122 16:06:48.454814 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:48 crc kubenswrapper[5032]: E0122 16:06:48.454922 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:49 crc kubenswrapper[5032]: I0122 16:06:49.282234 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/1.log" Jan 22 16:06:49 crc kubenswrapper[5032]: I0122 16:06:49.283085 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/0.log" Jan 22 16:06:49 crc kubenswrapper[5032]: I0122 16:06:49.283141 5032 generic.go:334] "Generic (PLEG): container finished" podID="d111dfc5-e30e-477d-81c1-afcc5bd064ad" containerID="38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827" exitCode=1 Jan 22 16:06:49 crc kubenswrapper[5032]: I0122 16:06:49.283208 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerDied","Data":"38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827"} Jan 22 16:06:49 crc kubenswrapper[5032]: I0122 16:06:49.283316 5032 scope.go:117] "RemoveContainer" containerID="c548e17c3b6ab7532d0ea8e2f600b05ed5a726ada605b1165dbc023dcc18f136" Jan 22 16:06:49 crc kubenswrapper[5032]: I0122 16:06:49.284421 5032 scope.go:117] "RemoveContainer" containerID="38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827" Jan 22 16:06:49 crc kubenswrapper[5032]: E0122 16:06:49.285760 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-zfz9c_openshift-multus(d111dfc5-e30e-477d-81c1-afcc5bd064ad)\"" pod="openshift-multus/multus-zfz9c" podUID="d111dfc5-e30e-477d-81c1-afcc5bd064ad" Jan 22 16:06:50 crc kubenswrapper[5032]: I0122 16:06:50.296172 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/1.log" Jan 22 16:06:50 crc kubenswrapper[5032]: I0122 16:06:50.454666 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:50 crc kubenswrapper[5032]: I0122 16:06:50.454806 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:50 crc kubenswrapper[5032]: E0122 16:06:50.454900 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:50 crc kubenswrapper[5032]: I0122 16:06:50.454938 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:50 crc kubenswrapper[5032]: I0122 16:06:50.454964 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:50 crc kubenswrapper[5032]: E0122 16:06:50.455706 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:50 crc kubenswrapper[5032]: E0122 16:06:50.455811 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:50 crc kubenswrapper[5032]: E0122 16:06:50.455891 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:52 crc kubenswrapper[5032]: I0122 16:06:52.454197 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:52 crc kubenswrapper[5032]: I0122 16:06:52.454404 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:52 crc kubenswrapper[5032]: E0122 16:06:52.454667 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:52 crc kubenswrapper[5032]: I0122 16:06:52.455075 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:52 crc kubenswrapper[5032]: I0122 16:06:52.455138 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:52 crc kubenswrapper[5032]: E0122 16:06:52.455230 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:52 crc kubenswrapper[5032]: E0122 16:06:52.455456 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:52 crc kubenswrapper[5032]: E0122 16:06:52.455667 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:54 crc kubenswrapper[5032]: I0122 16:06:54.454244 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:54 crc kubenswrapper[5032]: I0122 16:06:54.454341 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:54 crc kubenswrapper[5032]: I0122 16:06:54.454394 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:54 crc kubenswrapper[5032]: E0122 16:06:54.454490 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:54 crc kubenswrapper[5032]: I0122 16:06:54.454529 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:54 crc kubenswrapper[5032]: E0122 16:06:54.454720 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:54 crc kubenswrapper[5032]: E0122 16:06:54.455485 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:54 crc kubenswrapper[5032]: E0122 16:06:54.455647 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:54 crc kubenswrapper[5032]: I0122 16:06:54.455977 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:06:54 crc kubenswrapper[5032]: E0122 16:06:54.456237 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5q2w8_openshift-ovn-kubernetes(ba09835f-0f75-4266-9ae2-39dc3cb0fd4c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" Jan 22 16:06:56 crc kubenswrapper[5032]: I0122 16:06:56.454342 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:56 crc kubenswrapper[5032]: I0122 16:06:56.454420 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:56 crc kubenswrapper[5032]: I0122 16:06:56.454366 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:56 crc kubenswrapper[5032]: I0122 16:06:56.454467 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:56 crc kubenswrapper[5032]: E0122 16:06:56.461631 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:56 crc kubenswrapper[5032]: E0122 16:06:56.461932 5032 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 22 16:06:56 crc kubenswrapper[5032]: E0122 16:06:56.462664 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:56 crc kubenswrapper[5032]: E0122 16:06:56.463060 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:56 crc kubenswrapper[5032]: E0122 16:06:56.463334 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:06:56 crc kubenswrapper[5032]: E0122 16:06:56.625215 5032 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 16:06:58 crc kubenswrapper[5032]: I0122 16:06:58.454018 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:06:58 crc kubenswrapper[5032]: I0122 16:06:58.454105 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:06:58 crc kubenswrapper[5032]: I0122 16:06:58.454180 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:06:58 crc kubenswrapper[5032]: E0122 16:06:58.454277 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:06:58 crc kubenswrapper[5032]: I0122 16:06:58.454310 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:06:58 crc kubenswrapper[5032]: E0122 16:06:58.454487 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:06:58 crc kubenswrapper[5032]: E0122 16:06:58.454674 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:06:58 crc kubenswrapper[5032]: E0122 16:06:58.454836 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:07:00 crc kubenswrapper[5032]: I0122 16:07:00.454528 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:00 crc kubenswrapper[5032]: E0122 16:07:00.454688 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:07:00 crc kubenswrapper[5032]: I0122 16:07:00.455844 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:00 crc kubenswrapper[5032]: E0122 16:07:00.455936 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:07:00 crc kubenswrapper[5032]: I0122 16:07:00.456120 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:00 crc kubenswrapper[5032]: E0122 16:07:00.456933 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:07:00 crc kubenswrapper[5032]: I0122 16:07:00.462093 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:00 crc kubenswrapper[5032]: E0122 16:07:00.462578 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:07:01 crc kubenswrapper[5032]: E0122 16:07:01.626625 5032 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 16:07:02 crc kubenswrapper[5032]: I0122 16:07:02.453854 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:02 crc kubenswrapper[5032]: I0122 16:07:02.454070 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:02 crc kubenswrapper[5032]: I0122 16:07:02.454307 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:02 crc kubenswrapper[5032]: E0122 16:07:02.454285 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:07:02 crc kubenswrapper[5032]: E0122 16:07:02.454473 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:07:02 crc kubenswrapper[5032]: E0122 16:07:02.454610 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:07:02 crc kubenswrapper[5032]: I0122 16:07:02.454831 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:02 crc kubenswrapper[5032]: E0122 16:07:02.455065 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:07:03 crc kubenswrapper[5032]: I0122 16:07:03.455370 5032 scope.go:117] "RemoveContainer" containerID="38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827" Jan 22 16:07:04 crc kubenswrapper[5032]: I0122 16:07:04.358806 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/1.log" Jan 22 16:07:04 crc kubenswrapper[5032]: I0122 16:07:04.359801 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerStarted","Data":"b3bdb803d72f4f9eba58e5a41df66631589f736b5541903eb85b9763e27abf86"} Jan 22 16:07:04 crc kubenswrapper[5032]: I0122 16:07:04.454672 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:04 crc kubenswrapper[5032]: I0122 16:07:04.454678 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:04 crc kubenswrapper[5032]: I0122 16:07:04.454731 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:04 crc kubenswrapper[5032]: I0122 16:07:04.454893 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:04 crc kubenswrapper[5032]: E0122 16:07:04.456130 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:07:04 crc kubenswrapper[5032]: E0122 16:07:04.456227 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:07:04 crc kubenswrapper[5032]: E0122 16:07:04.456271 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:07:04 crc kubenswrapper[5032]: E0122 16:07:04.456342 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:07:06 crc kubenswrapper[5032]: I0122 16:07:06.453988 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:06 crc kubenswrapper[5032]: I0122 16:07:06.454038 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:06 crc kubenswrapper[5032]: I0122 16:07:06.454146 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:06 crc kubenswrapper[5032]: E0122 16:07:06.456091 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:07:06 crc kubenswrapper[5032]: I0122 16:07:06.456162 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:06 crc kubenswrapper[5032]: E0122 16:07:06.456345 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:07:06 crc kubenswrapper[5032]: E0122 16:07:06.456435 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:07:06 crc kubenswrapper[5032]: E0122 16:07:06.456535 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:07:06 crc kubenswrapper[5032]: E0122 16:07:06.627523 5032 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 16:07:07 crc kubenswrapper[5032]: I0122 16:07:07.454773 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.378544 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/3.log" Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.382709 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerStarted","Data":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.383126 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.403464 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-95g6b"] Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.404200 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:08 crc kubenswrapper[5032]: E0122 16:07:08.404368 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.453953 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.454032 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:08 crc kubenswrapper[5032]: I0122 16:07:08.453973 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:08 crc kubenswrapper[5032]: E0122 16:07:08.454138 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:07:08 crc kubenswrapper[5032]: E0122 16:07:08.454333 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:07:08 crc kubenswrapper[5032]: E0122 16:07:08.454500 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:07:10 crc kubenswrapper[5032]: I0122 16:07:10.454380 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:10 crc kubenswrapper[5032]: I0122 16:07:10.454387 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:10 crc kubenswrapper[5032]: E0122 16:07:10.455441 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 16:07:10 crc kubenswrapper[5032]: I0122 16:07:10.454550 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:10 crc kubenswrapper[5032]: E0122 16:07:10.455236 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 16:07:10 crc kubenswrapper[5032]: I0122 16:07:10.454430 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:10 crc kubenswrapper[5032]: E0122 16:07:10.455798 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-95g6b" podUID="8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2" Jan 22 16:07:10 crc kubenswrapper[5032]: E0122 16:07:10.455571 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.286539 5032 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.354297 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podStartSLOduration=116.354265518 podStartE2EDuration="1m56.354265518s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:08.427955639 +0000 UTC m=+132.240124730" watchObservedRunningTime="2026-01-22 16:07:12.354265518 +0000 UTC m=+136.166434599" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.356221 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mwxjh"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.357185 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.358642 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.359498 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.361384 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-hppgr"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.362251 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-hppgr" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.363260 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hzc8"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.363991 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.367994 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.369130 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.377565 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.378260 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-qhrqx"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.378746 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.379293 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.380771 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ndwb9"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.381208 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.381341 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.398269 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.398537 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.398553 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-v7spn"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.399330 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.399687 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.400043 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.400605 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.401082 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-thv97"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.400124 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.403368 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.403613 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.403738 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.403812 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.403939 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.403998 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.404041 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.404127 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.404173 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.404196 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.404269 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.404350 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.405277 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.405783 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.406300 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.410702 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.411463 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.411655 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.411807 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.411971 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.429559 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.429932 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.430216 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.430371 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.431169 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.431376 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.431401 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.432011 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.432268 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.432848 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.434995 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.435204 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.435336 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dqp7n"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.436664 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.436705 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.434428 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.455600 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.455646 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.455618 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.456222 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.456753 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.456842 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.456931 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.457074 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.458125 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.458315 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.458520 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.458923 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459059 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459301 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459373 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459395 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459462 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459547 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459427 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459629 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.459713 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.460058 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-etcd-client\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.460118 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhhvf\" (UniqueName: \"kubernetes.io/projected/6b35b27c-b438-4743-b8e3-ff4e510c497d-kube-api-access-jhhvf\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461717 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6b35b27c-b438-4743-b8e3-ff4e510c497d-trusted-ca\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461759 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461782 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-config\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461804 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h978z\" (UniqueName: \"kubernetes.io/projected/5de5146a-a4b0-4f38-963c-1d9c101ec864-kube-api-access-h978z\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461827 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7d624b9-2fed-4924-90f5-9918050b8074-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.460681 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.460448 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.460542 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.460945 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461015 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461150 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461196 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461231 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461270 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461352 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461403 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461726 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461804 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.461854 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5de5146a-a4b0-4f38-963c-1d9c101ec864-serving-cert\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462448 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-client-ca\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462492 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rghlf\" (UniqueName: \"kubernetes.io/projected/8b534bfa-1e36-4825-bbdb-aec579b973f8-kube-api-access-rghlf\") pod \"downloads-7954f5f757-hppgr\" (UID: \"8b534bfa-1e36-4825-bbdb-aec579b973f8\") " pod="openshift-console/downloads-7954f5f757-hppgr" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462514 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-audit-policies\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462534 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462538 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d51718-aef8-4657-859f-aae6bb36d879-serving-cert\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462560 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-encryption-config\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462585 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-config\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462609 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c22e380-d63b-4d02-a3a5-2e241c71fc16-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462631 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7d624b9-2fed-4924-90f5-9918050b8074-config\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462672 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbq7b\" (UniqueName: \"kubernetes.io/projected/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-kube-api-access-xbq7b\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462695 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-serving-cert\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462718 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-available-featuregates\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462740 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462763 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-serving-cert\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462797 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462805 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpfmm\" (UniqueName: \"kubernetes.io/projected/3c22e380-d63b-4d02-a3a5-2e241c71fc16-kube-api-access-hpfmm\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462838 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b35b27c-b438-4743-b8e3-ff4e510c497d-config\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.462961 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463006 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463424 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b35b27c-b438-4743-b8e3-ff4e510c497d-serving-cert\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463475 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/632f2319-d263-4e72-88ea-76f60ee7121e-audit-dir\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463499 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq9g9\" (UniqueName: \"kubernetes.io/projected/632f2319-d263-4e72-88ea-76f60ee7121e-kube-api-access-qq9g9\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463522 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463544 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66ndm\" (UniqueName: \"kubernetes.io/projected/52d51718-aef8-4657-859f-aae6bb36d879-kube-api-access-66ndm\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463575 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-service-ca-bundle\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463595 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c22e380-d63b-4d02-a3a5-2e241c71fc16-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463614 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7d624b9-2fed-4924-90f5-9918050b8074-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.463718 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.468498 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.469265 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.469308 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.478721 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.479841 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.481737 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.481950 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.482460 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.482548 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.482688 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.482829 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.482998 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.483261 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.483394 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.483547 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-w7mns"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.483950 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.484446 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.485002 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.485040 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.485134 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.485247 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.488236 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.488350 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.488415 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.488236 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.488502 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.488540 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.490559 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.490693 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.490846 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.490988 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491102 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491224 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491318 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491413 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491515 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491601 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491642 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491742 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.491615 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.492052 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-mrmfl"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.492798 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.494691 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.495285 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.497139 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-db6bl"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.497628 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.500161 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.511118 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.511949 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-627th"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.512662 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.512943 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.519445 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.520280 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.520973 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.521830 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.521912 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.524232 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.527296 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.527590 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.528239 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.528423 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-fdtwm"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.528830 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.529374 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.529791 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.531199 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-48r96"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.531575 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.531634 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.532347 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.532735 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.533428 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.534295 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.534458 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.534833 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.535383 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.535782 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.536311 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.536835 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.537318 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rsck8"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.538017 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.540063 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.541842 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-4tdkk"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.542236 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.542909 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-fthxg"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.543445 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.543824 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mwxjh"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.544829 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.545759 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-hppgr"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.546774 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-w7mns"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.547987 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-mrmfl"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.548934 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.549964 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.551699 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-v7spn"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.553315 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.555611 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.558696 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-db6bl"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.561730 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.562493 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-qhrqx"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564147 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-image-import-ca\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564191 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/632f2319-d263-4e72-88ea-76f60ee7121e-audit-dir\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564215 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq9g9\" (UniqueName: \"kubernetes.io/projected/632f2319-d263-4e72-88ea-76f60ee7121e-kube-api-access-qq9g9\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564233 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b35b27c-b438-4743-b8e3-ff4e510c497d-serving-cert\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564251 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564271 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66ndm\" (UniqueName: \"kubernetes.io/projected/52d51718-aef8-4657-859f-aae6bb36d879-kube-api-access-66ndm\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564295 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-audit\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564312 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564330 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-client-ca\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564346 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c22e380-d63b-4d02-a3a5-2e241c71fc16-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564364 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7d624b9-2fed-4924-90f5-9918050b8074-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564388 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-service-ca-bundle\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564412 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564431 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-etcd-client\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564449 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhhvf\" (UniqueName: \"kubernetes.io/projected/6b35b27c-b438-4743-b8e3-ff4e510c497d-kube-api-access-jhhvf\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564469 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flvlh\" (UniqueName: \"kubernetes.io/projected/ceb9b499-7c79-4268-9e2c-6c65e8604713-kube-api-access-flvlh\") pod \"cluster-samples-operator-665b6dd947-g7h77\" (UID: \"ceb9b499-7c79-4268-9e2c-6c65e8604713\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564487 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564502 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6b35b27c-b438-4743-b8e3-ff4e510c497d-trusted-ca\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564518 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-config\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564535 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7d624b9-2fed-4924-90f5-9918050b8074-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564553 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h978z\" (UniqueName: \"kubernetes.io/projected/5de5146a-a4b0-4f38-963c-1d9c101ec864-kube-api-access-h978z\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564569 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-certificates\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564584 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szsns\" (UniqueName: \"kubernetes.io/projected/c01acdfa-c0a1-46bb-a57e-e616540daeaa-kube-api-access-szsns\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564604 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5de5146a-a4b0-4f38-963c-1d9c101ec864-serving-cert\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564623 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mq2zv\" (UniqueName: \"kubernetes.io/projected/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-kube-api-access-mq2zv\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564642 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-client-ca\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564658 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdkst\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-kube-api-access-cdkst\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564675 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-serving-cert\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564693 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rghlf\" (UniqueName: \"kubernetes.io/projected/8b534bfa-1e36-4825-bbdb-aec579b973f8-kube-api-access-rghlf\") pod \"downloads-7954f5f757-hppgr\" (UID: \"8b534bfa-1e36-4825-bbdb-aec579b973f8\") " pod="openshift-console/downloads-7954f5f757-hppgr" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564711 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-audit-policies\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564726 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c01acdfa-c0a1-46bb-a57e-e616540daeaa-serving-cert\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564743 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d51718-aef8-4657-859f-aae6bb36d879-serving-cert\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564769 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-encryption-config\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564787 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-encryption-config\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564804 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ceb9b499-7c79-4268-9e2c-6c65e8604713-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g7h77\" (UID: \"ceb9b499-7c79-4268-9e2c-6c65e8604713\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564823 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-trusted-ca\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564841 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-node-pullsecrets\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564871 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-etcd-client\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564889 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c22e380-d63b-4d02-a3a5-2e241c71fc16-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.564906 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7d624b9-2fed-4924-90f5-9918050b8074-config\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565358 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-config\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565382 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbq7b\" (UniqueName: \"kubernetes.io/projected/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-kube-api-access-xbq7b\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565400 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565419 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-etcd-serving-ca\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565438 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-serving-cert\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565463 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-available-featuregates\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565482 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565503 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-serving-cert\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565522 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-audit-dir\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565542 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-config\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565561 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565578 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-bound-sa-token\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565611 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565629 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpfmm\" (UniqueName: \"kubernetes.io/projected/3c22e380-d63b-4d02-a3a5-2e241c71fc16-kube-api-access-hpfmm\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565655 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-tls\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565681 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-config\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.565745 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b35b27c-b438-4743-b8e3-ff4e510c497d-config\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.566437 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/632f2319-d263-4e72-88ea-76f60ee7121e-audit-dir\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.566570 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b35b27c-b438-4743-b8e3-ff4e510c497d-config\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.566905 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.066892316 +0000 UTC m=+136.879061347 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.569079 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-available-featuregates\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.569437 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.569839 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.570419 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-service-ca-bundle\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.570620 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.572164 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-audit-policies\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.572233 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/632f2319-d263-4e72-88ea-76f60ee7121e-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.572850 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c22e380-d63b-4d02-a3a5-2e241c71fc16-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.573261 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6b35b27c-b438-4743-b8e3-ff4e510c497d-trusted-ca\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.573539 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7d624b9-2fed-4924-90f5-9918050b8074-config\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.573843 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c22e380-d63b-4d02-a3a5-2e241c71fc16-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.573886 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-etcd-client\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.573952 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5de5146a-a4b0-4f38-963c-1d9c101ec864-config\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.575640 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hzc8"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.576298 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b35b27c-b438-4743-b8e3-ff4e510c497d-serving-cert\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.576874 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-encryption-config\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.577187 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5de5146a-a4b0-4f38-963c-1d9c101ec864-serving-cert\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.577967 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/632f2319-d263-4e72-88ea-76f60ee7121e-serving-cert\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.578245 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7d624b9-2fed-4924-90f5-9918050b8074-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.584692 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.589707 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.589760 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dqp7n"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.590425 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-dq8vl"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.600247 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.600397 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d51718-aef8-4657-859f-aae6bb36d879-serving-cert\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.601341 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-client-ca\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.604443 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.615597 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ndwb9"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.615652 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.618228 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-config\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.629317 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.629424 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-fdtwm"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.629487 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.629617 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-serving-cert\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.634099 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.635806 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-48r96"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.636917 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-627th"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.638172 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.639434 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.639452 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.640809 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-p6hqk"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.642034 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.642045 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xdxqs"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.643610 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.643614 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.644867 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.646272 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.647731 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.649394 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-thv97"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.650628 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-p6hqk"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.651886 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.653472 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-fthxg"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.654946 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.656378 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xdxqs"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.657630 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rsck8"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.658828 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-vdmbf"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.659414 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.659486 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.660027 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-vdmbf"] Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.666700 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.666851 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.166831245 +0000 UTC m=+136.979000276 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.666918 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-tls\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.666945 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-config\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.666978 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-image-import-ca\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667005 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-audit\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667020 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667039 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-client-ca\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667077 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667122 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flvlh\" (UniqueName: \"kubernetes.io/projected/ceb9b499-7c79-4268-9e2c-6c65e8604713-kube-api-access-flvlh\") pod \"cluster-samples-operator-665b6dd947-g7h77\" (UID: \"ceb9b499-7c79-4268-9e2c-6c65e8604713\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667149 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-certificates\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667167 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szsns\" (UniqueName: \"kubernetes.io/projected/c01acdfa-c0a1-46bb-a57e-e616540daeaa-kube-api-access-szsns\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667187 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mq2zv\" (UniqueName: \"kubernetes.io/projected/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-kube-api-access-mq2zv\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667205 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdkst\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-kube-api-access-cdkst\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667220 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-serving-cert\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667250 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c01acdfa-c0a1-46bb-a57e-e616540daeaa-serving-cert\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667278 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-encryption-config\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667296 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ceb9b499-7c79-4268-9e2c-6c65e8604713-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g7h77\" (UID: \"ceb9b499-7c79-4268-9e2c-6c65e8604713\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667313 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-trusted-ca\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667332 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-node-pullsecrets\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667368 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-etcd-client\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667405 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667432 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-etcd-serving-ca\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667485 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-audit-dir\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667511 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-config\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667560 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667600 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.667624 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-bound-sa-token\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.668125 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-image-import-ca\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.668329 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.668720 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-audit\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.668785 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-audit-dir\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.669022 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-node-pullsecrets\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.669274 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-certificates\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.669726 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.169711355 +0000 UTC m=+136.981880386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.669800 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.670033 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-config\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.670217 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-etcd-serving-ca\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.670414 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-trusted-ca\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.671083 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.671540 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-client-ca\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.671611 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-tls\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.671741 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c01acdfa-c0a1-46bb-a57e-e616540daeaa-serving-cert\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.672073 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-serving-cert\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.673132 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-etcd-client\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.673497 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ceb9b499-7c79-4268-9e2c-6c65e8604713-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g7h77\" (UID: \"ceb9b499-7c79-4268-9e2c-6c65e8604713\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.673775 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.674297 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-config\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.674659 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-encryption-config\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.680637 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.699114 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.719464 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.739974 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.760144 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.768297 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.768453 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.268412233 +0000 UTC m=+137.080581294 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.768792 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.769190 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.269181452 +0000 UTC m=+137.081350483 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.780519 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.799542 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.820218 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.839434 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.874616 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.875085 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.375024274 +0000 UTC m=+137.187193345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.879340 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.881526 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.381491082 +0000 UTC m=+137.193660113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.882330 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.891133 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.904540 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.918932 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.940035 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.958852 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.980566 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.981013 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.981418 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.481372669 +0000 UTC m=+137.293541740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.981741 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:12 crc kubenswrapper[5032]: E0122 16:07:12.982626 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.482600819 +0000 UTC m=+137.294769890 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:12 crc kubenswrapper[5032]: I0122 16:07:12.999149 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.020222 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.049480 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.060370 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.080225 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.083192 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.083362 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.583332727 +0000 UTC m=+137.395501798 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.101091 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.120426 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.139988 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.160650 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.180024 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.185495 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.186030 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.686001642 +0000 UTC m=+137.498170703 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.200973 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.220374 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.241729 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.260362 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.280326 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.286357 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.286626 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.786589006 +0000 UTC m=+137.598758077 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.287002 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.287469 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.787450187 +0000 UTC m=+137.599619258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.299360 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.320286 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.340136 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.361515 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.380278 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.387563 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.388020 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.8879809 +0000 UTC m=+137.700149961 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.388321 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.389046 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.889001985 +0000 UTC m=+137.701171046 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.400962 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.419970 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.440079 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.460443 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.479941 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.490117 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.490318 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.990291087 +0000 UTC m=+137.802460158 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.490548 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.491412 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:13.991392384 +0000 UTC m=+137.803561445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.500689 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.519370 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.538401 5032 request.go:700] Waited for 1.009283916s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca/configmaps?fieldSelector=metadata.name%3Dsigning-cabundle&limit=500&resourceVersion=0 Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.540447 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.559843 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.579961 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.591624 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.591928 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.091843064 +0000 UTC m=+137.904012135 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.592124 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.592687 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.092668155 +0000 UTC m=+137.904837216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.601281 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.619798 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.639090 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.660404 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.680007 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.693755 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.693966 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.193930655 +0000 UTC m=+138.006099756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.694193 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.694794 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.194770146 +0000 UTC m=+138.006939217 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.699789 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.719968 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.740671 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.759851 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.780029 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.795122 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.795279 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.295249078 +0000 UTC m=+138.107418149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.795487 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.796002 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.295986626 +0000 UTC m=+138.108155687 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.800749 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.820781 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.839935 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.860673 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.880052 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.896709 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.896935 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.396893658 +0000 UTC m=+138.209062739 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.897146 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.897718 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.397612965 +0000 UTC m=+138.209782036 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.899984 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.931620 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.939978 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.959433 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.980059 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.998501 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.998743 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.498714232 +0000 UTC m=+138.310883293 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:13 crc kubenswrapper[5032]: I0122 16:07:13.998942 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:13 crc kubenswrapper[5032]: E0122 16:07:13.999438 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.499422329 +0000 UTC m=+138.311591390 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.000025 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.019888 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.050934 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.060726 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.081543 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.100331 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.100636 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.600581738 +0000 UTC m=+138.412750809 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.100790 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.101272 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.601253194 +0000 UTC m=+138.413422235 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.102750 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.120410 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.140111 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.161223 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.179892 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.200739 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.202610 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.202786 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.702767301 +0000 UTC m=+138.514936332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.203188 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.203937 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.703902169 +0000 UTC m=+138.516071240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.220442 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.239203 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.261259 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.281064 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.300508 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.304818 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.305102 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.805062177 +0000 UTC m=+138.617231288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.305271 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.305757 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.805735173 +0000 UTC m=+138.617904234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.348541 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbq7b\" (UniqueName: \"kubernetes.io/projected/a6acdcf7-347a-42db-83a3-e5fc2bde8daa-kube-api-access-xbq7b\") pod \"openshift-config-operator-7777fb866f-thv97\" (UID: \"a6acdcf7-347a-42db-83a3-e5fc2bde8daa\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.356777 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.372405 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq9g9\" (UniqueName: \"kubernetes.io/projected/632f2319-d263-4e72-88ea-76f60ee7121e-kube-api-access-qq9g9\") pod \"apiserver-7bbb656c7d-wjs5l\" (UID: \"632f2319-d263-4e72-88ea-76f60ee7121e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.392154 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rghlf\" (UniqueName: \"kubernetes.io/projected/8b534bfa-1e36-4825-bbdb-aec579b973f8-kube-api-access-rghlf\") pod \"downloads-7954f5f757-hppgr\" (UID: \"8b534bfa-1e36-4825-bbdb-aec579b973f8\") " pod="openshift-console/downloads-7954f5f757-hppgr" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.406453 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.406800 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.906752158 +0000 UTC m=+138.718921229 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.407367 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.410990 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:14.910953691 +0000 UTC m=+138.723122752 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.413252 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpfmm\" (UniqueName: \"kubernetes.io/projected/3c22e380-d63b-4d02-a3a5-2e241c71fc16-kube-api-access-hpfmm\") pod \"openshift-controller-manager-operator-756b6f6bc6-nqd5d\" (UID: \"3c22e380-d63b-4d02-a3a5-2e241c71fc16\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.430610 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7d624b9-2fed-4924-90f5-9918050b8074-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-q7skp\" (UID: \"e7d624b9-2fed-4924-90f5-9918050b8074\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.440790 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66ndm\" (UniqueName: \"kubernetes.io/projected/52d51718-aef8-4657-859f-aae6bb36d879-kube-api-access-66ndm\") pod \"route-controller-manager-6576b87f9c-k9qb2\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.470847 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h978z\" (UniqueName: \"kubernetes.io/projected/5de5146a-a4b0-4f38-963c-1d9c101ec864-kube-api-access-h978z\") pod \"authentication-operator-69f744f599-mwxjh\" (UID: \"5de5146a-a4b0-4f38-963c-1d9c101ec864\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.482269 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhhvf\" (UniqueName: \"kubernetes.io/projected/6b35b27c-b438-4743-b8e3-ff4e510c497d-kube-api-access-jhhvf\") pod \"console-operator-58897d9998-qhrqx\" (UID: \"6b35b27c-b438-4743-b8e3-ff4e510c497d\") " pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.488670 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.501721 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.502167 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.509656 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.509687 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.009660379 +0000 UTC m=+138.821829420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.510625 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.511175 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.011152515 +0000 UTC m=+138.823321586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.548134 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-hppgr" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.548747 5032 request.go:700] Waited for 1.94800069s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-dockercfg-qx5rd&limit=500&resourceVersion=0 Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.550264 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.550743 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.565011 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.584981 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.589063 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.599793 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.612146 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.612483 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.112466248 +0000 UTC m=+138.924635279 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.619937 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.627900 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.640638 5032 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.641332 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.660751 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.680492 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.701505 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.713091 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.713649 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.213630946 +0000 UTC m=+139.025799977 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.719608 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.740032 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.760215 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.802027 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-bound-sa-token\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.813733 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.814030 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.314015435 +0000 UTC m=+139.126184466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.816355 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mq2zv\" (UniqueName: \"kubernetes.io/projected/5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a-kube-api-access-mq2zv\") pod \"apiserver-76f77b778f-v7spn\" (UID: \"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a\") " pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.833370 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdkst\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-kube-api-access-cdkst\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.856764 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szsns\" (UniqueName: \"kubernetes.io/projected/c01acdfa-c0a1-46bb-a57e-e616540daeaa-kube-api-access-szsns\") pod \"controller-manager-879f6c89f-2hzc8\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.875101 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flvlh\" (UniqueName: \"kubernetes.io/projected/ceb9b499-7c79-4268-9e2c-6c65e8604713-kube-api-access-flvlh\") pod \"cluster-samples-operator-665b6dd947-g7h77\" (UID: \"ceb9b499-7c79-4268-9e2c-6c65e8604713\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.905916 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915049 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t6xb\" (UniqueName: \"kubernetes.io/projected/fb8a5762-1a9d-4011-a049-592cd5f33244-kube-api-access-6t6xb\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915084 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/aef5966c-f9f0-4f49-b829-3c9f48b8420c-srv-cert\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915105 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915122 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915137 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpf6h\" (UniqueName: \"kubernetes.io/projected/1e14d069-8a95-4e40-b130-f0c65506b423-kube-api-access-rpf6h\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915153 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mjs7\" (UniqueName: \"kubernetes.io/projected/da458f3d-fa4b-40d1-b98b-903b4af624ce-kube-api-access-2mjs7\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915170 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11988f58-cabc-4d36-9b5c-242fe3b570d3-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915206 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915225 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-oauth-config\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915242 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tl4d\" (UniqueName: \"kubernetes.io/projected/f63082de-7673-4f58-8f7e-4142b6efc221-kube-api-access-2tl4d\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915257 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900-metrics-tls\") pod \"dns-operator-744455d44c-mrmfl\" (UID: \"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900\") " pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915282 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/abd10370-83a9-4daf-88db-e8506a3d4a08-metrics-tls\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915307 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d796c730-c411-400b-91a1-00149a3b408b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915324 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915362 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr9mc\" (UniqueName: \"kubernetes.io/projected/22d00a1b-b830-4b42-bc79-536d6e1f92e8-kube-api-access-hr9mc\") pod \"migrator-59844c95c7-wmgm5\" (UID: \"22d00a1b-b830-4b42-bc79-536d6e1f92e8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915378 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915394 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbtsp\" (UniqueName: \"kubernetes.io/projected/3d9c8bba-0473-49f1-885c-0ae256709252-kube-api-access-zbtsp\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915421 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-stats-auth\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915439 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/efbef706-f0b8-40d2-bbb1-517e5685dd30-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915456 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f63082de-7673-4f58-8f7e-4142b6efc221-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915473 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e7a95b4-7024-4049-b322-ccdd11835356-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915490 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq8g7\" (UniqueName: \"kubernetes.io/projected/85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900-kube-api-access-xq8g7\") pod \"dns-operator-744455d44c-mrmfl\" (UID: \"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900\") " pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915539 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e14d069-8a95-4e40-b130-f0c65506b423-secret-volume\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915557 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-ca\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915575 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/aef5966c-f9f0-4f49-b829-3c9f48b8420c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915602 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k5j4\" (UniqueName: \"kubernetes.io/projected/7271da0d-ff36-40b3-acfd-1d782e98db0f-kube-api-access-7k5j4\") pod \"package-server-manager-789f6589d5-fmph6\" (UID: \"7271da0d-ff36-40b3-acfd-1d782e98db0f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915635 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-policies\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915660 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz2cr\" (UniqueName: \"kubernetes.io/projected/abd10370-83a9-4daf-88db-e8506a3d4a08-kube-api-access-sz2cr\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915699 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rqpg\" (UniqueName: \"kubernetes.io/projected/0a89d202-f652-48bc-a28e-82c7790dfe94-kube-api-access-9rqpg\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915714 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915739 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfe97498-e7f6-4e19-ad51-65c34957b415-config\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915753 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-config\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915768 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-profile-collector-cert\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915803 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-oauth-serving-cert\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915819 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915846 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-metrics-certs\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915887 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-trusted-ca-bundle\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915905 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7271da0d-ff36-40b3-acfd-1d782e98db0f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fmph6\" (UID: \"7271da0d-ff36-40b3-acfd-1d782e98db0f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915924 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915951 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efbef706-f0b8-40d2-bbb1-517e5685dd30-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915968 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-service-ca\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.915985 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e7a95b4-7024-4049-b322-ccdd11835356-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916003 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916021 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/77603536-1886-445b-b0e5-e0985948c137-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916035 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd8tm\" (UniqueName: \"kubernetes.io/projected/0a8119c9-c2f9-46d4-9dfc-942803871800-kube-api-access-dd8tm\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916050 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/3d9c8bba-0473-49f1-885c-0ae256709252-machine-approver-tls\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916067 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssmrb\" (UniqueName: \"kubernetes.io/projected/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-kube-api-access-ssmrb\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916097 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvzhv\" (UniqueName: \"kubernetes.io/projected/b3b48fcd-6341-4599-ae1e-f424e4a24f55-kube-api-access-gvzhv\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916112 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48cddb00-9308-423c-90df-9a0f52d6b6b3-serving-cert\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916127 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efbef706-f0b8-40d2-bbb1-517e5685dd30-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916141 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-serving-cert\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916157 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a8119c9-c2f9-46d4-9dfc-942803871800-webhook-cert\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916177 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/abd10370-83a9-4daf-88db-e8506a3d4a08-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916193 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-srv-cert\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916216 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gmrz\" (UniqueName: \"kubernetes.io/projected/efbef706-f0b8-40d2-bbb1-517e5685dd30-kube-api-access-8gmrz\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916233 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-proxy-tls\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916248 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916265 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916300 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3d9c8bba-0473-49f1-885c-0ae256709252-auth-proxy-config\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916314 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-dir\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916328 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/dfe97498-e7f6-4e19-ad51-65c34957b415-images\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916341 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a8119c9-c2f9-46d4-9dfc-942803871800-apiservice-cert\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916355 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11988f58-cabc-4d36-9b5c-242fe3b570d3-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916370 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ttc7\" (UniqueName: \"kubernetes.io/projected/48cddb00-9308-423c-90df-9a0f52d6b6b3-kube-api-access-4ttc7\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916395 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a89d202-f652-48bc-a28e-82c7790dfe94-serving-cert\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916411 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qrkp4\" (UID: \"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916434 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d796c730-c411-400b-91a1-00149a3b408b-config\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916455 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d796c730-c411-400b-91a1-00149a3b408b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916470 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-default-certificate\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916484 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916515 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5vrp\" (UniqueName: \"kubernetes.io/projected/6ed09764-a5a8-450e-8592-a4e487efecc6-kube-api-access-c5vrp\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916529 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e14d069-8a95-4e40-b130-f0c65506b423-config-volume\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916545 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wp2s\" (UniqueName: \"kubernetes.io/projected/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-kube-api-access-4wp2s\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916561 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgq28\" (UniqueName: \"kubernetes.io/projected/f8efa593-e2f5-475f-b7ff-cb4f53731b37-kube-api-access-fgq28\") pod \"multus-admission-controller-857f4d67dd-627th\" (UID: \"f8efa593-e2f5-475f-b7ff-cb4f53731b37\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916575 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0a8119c9-c2f9-46d4-9dfc-942803871800-tmpfs\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916589 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/705472d6-e8cd-483c-b315-b3a3df1da2a3-signing-key\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916604 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f63082de-7673-4f58-8f7e-4142b6efc221-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916620 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/abd10370-83a9-4daf-88db-e8506a3d4a08-trusted-ca\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916636 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lqs2\" (UniqueName: \"kubernetes.io/projected/77603536-1886-445b-b0e5-e0985948c137-kube-api-access-9lqs2\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916652 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffggr\" (UniqueName: \"kubernetes.io/projected/705472d6-e8cd-483c-b315-b3a3df1da2a3-kube-api-access-ffggr\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916684 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/705472d6-e8cd-483c-b315-b3a3df1da2a3-signing-cabundle\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916729 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48cddb00-9308-423c-90df-9a0f52d6b6b3-config\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916745 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916772 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/dfe97498-e7f6-4e19-ad51-65c34957b415-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916799 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e7a95b4-7024-4049-b322-ccdd11835356-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916821 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916838 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2g76\" (UniqueName: \"kubernetes.io/projected/dfe97498-e7f6-4e19-ad51-65c34957b415-kube-api-access-z2g76\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916867 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f8efa593-e2f5-475f-b7ff-cb4f53731b37-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-627th\" (UID: \"f8efa593-e2f5-475f-b7ff-cb4f53731b37\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916883 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzgfj\" (UniqueName: \"kubernetes.io/projected/e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a-kube-api-access-mzgfj\") pod \"control-plane-machine-set-operator-78cbb6b69f-qrkp4\" (UID: \"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916899 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9c8bba-0473-49f1-885c-0ae256709252-config\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916917 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/77603536-1886-445b-b0e5-e0985948c137-proxy-tls\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916934 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-service-ca\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916950 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/da458f3d-fa4b-40d1-b98b-903b4af624ce-service-ca-bundle\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.916965 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g82z\" (UniqueName: \"kubernetes.io/projected/aef5966c-f9f0-4f49-b829-3c9f48b8420c-kube-api-access-7g82z\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.917003 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-images\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.917036 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-client\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.917053 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-config\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.917068 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.917084 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbl4l\" (UniqueName: \"kubernetes.io/projected/11988f58-cabc-4d36-9b5c-242fe3b570d3-kube-api-access-tbl4l\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:14 crc kubenswrapper[5032]: E0122 16:07:14.917589 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.417577112 +0000 UTC m=+139.229746143 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:14 crc kubenswrapper[5032]: I0122 16:07:14.934926 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.028409 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.028620 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.528578291 +0000 UTC m=+139.340747352 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.028720 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wp2s\" (UniqueName: \"kubernetes.io/projected/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-kube-api-access-4wp2s\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.028767 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e14d069-8a95-4e40-b130-f0c65506b423-config-volume\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.028838 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4e04e48-8670-4b7a-9c72-e89c34bc835f-cert\") pod \"ingress-canary-vdmbf\" (UID: \"e4e04e48-8670-4b7a-9c72-e89c34bc835f\") " pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.028901 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0a8119c9-c2f9-46d4-9dfc-942803871800-tmpfs\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.028942 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jj8g\" (UniqueName: \"kubernetes.io/projected/e4e04e48-8670-4b7a-9c72-e89c34bc835f-kube-api-access-2jj8g\") pod \"ingress-canary-vdmbf\" (UID: \"e4e04e48-8670-4b7a-9c72-e89c34bc835f\") " pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.028969 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82jxc\" (UniqueName: \"kubernetes.io/projected/6ee38646-5872-444d-ba8c-32b92ca36fff-kube-api-access-82jxc\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029005 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgq28\" (UniqueName: \"kubernetes.io/projected/f8efa593-e2f5-475f-b7ff-cb4f53731b37-kube-api-access-fgq28\") pod \"multus-admission-controller-857f4d67dd-627th\" (UID: \"f8efa593-e2f5-475f-b7ff-cb4f53731b37\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029065 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/705472d6-e8cd-483c-b315-b3a3df1da2a3-signing-key\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029095 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f63082de-7673-4f58-8f7e-4142b6efc221-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029130 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/abd10370-83a9-4daf-88db-e8506a3d4a08-trusted-ca\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029161 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lqs2\" (UniqueName: \"kubernetes.io/projected/77603536-1886-445b-b0e5-e0985948c137-kube-api-access-9lqs2\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029548 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffggr\" (UniqueName: \"kubernetes.io/projected/705472d6-e8cd-483c-b315-b3a3df1da2a3-kube-api-access-ffggr\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029634 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/705472d6-e8cd-483c-b315-b3a3df1da2a3-signing-cabundle\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029819 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48cddb00-9308-423c-90df-9a0f52d6b6b3-config\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029906 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029946 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/dfe97498-e7f6-4e19-ad51-65c34957b415-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.029952 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0a8119c9-c2f9-46d4-9dfc-942803871800-tmpfs\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030006 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g2lp\" (UniqueName: \"kubernetes.io/projected/5350f539-bc65-4421-b3c2-cf7dc0d52e98-kube-api-access-7g2lp\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030044 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e7a95b4-7024-4049-b322-ccdd11835356-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030109 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030144 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2g76\" (UniqueName: \"kubernetes.io/projected/dfe97498-e7f6-4e19-ad51-65c34957b415-kube-api-access-z2g76\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030178 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f8efa593-e2f5-475f-b7ff-cb4f53731b37-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-627th\" (UID: \"f8efa593-e2f5-475f-b7ff-cb4f53731b37\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030211 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzgfj\" (UniqueName: \"kubernetes.io/projected/e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a-kube-api-access-mzgfj\") pod \"control-plane-machine-set-operator-78cbb6b69f-qrkp4\" (UID: \"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030250 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9c8bba-0473-49f1-885c-0ae256709252-config\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030290 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/77603536-1886-445b-b0e5-e0985948c137-proxy-tls\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030323 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-service-ca\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030358 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g82z\" (UniqueName: \"kubernetes.io/projected/aef5966c-f9f0-4f49-b829-3c9f48b8420c-kube-api-access-7g82z\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030391 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/da458f3d-fa4b-40d1-b98b-903b4af624ce-service-ca-bundle\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030427 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-images\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030482 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-client\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030521 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-config\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030524 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f63082de-7673-4f58-8f7e-4142b6efc221-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030554 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.030596 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbl4l\" (UniqueName: \"kubernetes.io/projected/11988f58-cabc-4d36-9b5c-242fe3b570d3-kube-api-access-tbl4l\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.031273 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e14d069-8a95-4e40-b130-f0c65506b423-config-volume\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.032706 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.532685271 +0000 UTC m=+139.344854342 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033079 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-service-ca\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033364 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/6ee38646-5872-444d-ba8c-32b92ca36fff-node-bootstrap-token\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033430 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t6xb\" (UniqueName: \"kubernetes.io/projected/fb8a5762-1a9d-4011-a049-592cd5f33244-kube-api-access-6t6xb\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033467 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/aef5966c-f9f0-4f49-b829-3c9f48b8420c-srv-cert\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033501 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033533 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033567 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpf6h\" (UniqueName: \"kubernetes.io/projected/1e14d069-8a95-4e40-b130-f0c65506b423-kube-api-access-rpf6h\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033600 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mjs7\" (UniqueName: \"kubernetes.io/projected/da458f3d-fa4b-40d1-b98b-903b4af624ce-kube-api-access-2mjs7\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033635 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11988f58-cabc-4d36-9b5c-242fe3b570d3-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.033768 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/abd10370-83a9-4daf-88db-e8506a3d4a08-trusted-ca\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.035007 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/705472d6-e8cd-483c-b315-b3a3df1da2a3-signing-cabundle\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036165 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/6ee38646-5872-444d-ba8c-32b92ca36fff-certs\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036251 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036352 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tl4d\" (UniqueName: \"kubernetes.io/projected/f63082de-7673-4f58-8f7e-4142b6efc221-kube-api-access-2tl4d\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036402 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900-metrics-tls\") pod \"dns-operator-744455d44c-mrmfl\" (UID: \"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900\") " pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036435 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f54b6f41-be22-4926-abd2-6c390df2fb49-config-volume\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036466 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-oauth-config\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036500 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/abd10370-83a9-4daf-88db-e8506a3d4a08-metrics-tls\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036620 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d796c730-c411-400b-91a1-00149a3b408b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036658 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036700 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr9mc\" (UniqueName: \"kubernetes.io/projected/22d00a1b-b830-4b42-bc79-536d6e1f92e8-kube-api-access-hr9mc\") pod \"migrator-59844c95c7-wmgm5\" (UID: \"22d00a1b-b830-4b42-bc79-536d6e1f92e8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036733 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036767 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbtsp\" (UniqueName: \"kubernetes.io/projected/3d9c8bba-0473-49f1-885c-0ae256709252-kube-api-access-zbtsp\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036812 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-stats-auth\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036846 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/efbef706-f0b8-40d2-bbb1-517e5685dd30-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036909 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f63082de-7673-4f58-8f7e-4142b6efc221-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036941 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e7a95b4-7024-4049-b322-ccdd11835356-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.036994 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq8g7\" (UniqueName: \"kubernetes.io/projected/85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900-kube-api-access-xq8g7\") pod \"dns-operator-744455d44c-mrmfl\" (UID: \"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900\") " pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037030 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e14d069-8a95-4e40-b130-f0c65506b423-secret-volume\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037063 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-ca\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037091 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/aef5966c-f9f0-4f49-b829-3c9f48b8420c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037126 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k5j4\" (UniqueName: \"kubernetes.io/projected/7271da0d-ff36-40b3-acfd-1d782e98db0f-kube-api-access-7k5j4\") pod \"package-server-manager-789f6589d5-fmph6\" (UID: \"7271da0d-ff36-40b3-acfd-1d782e98db0f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037154 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-policies\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037188 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz2cr\" (UniqueName: \"kubernetes.io/projected/abd10370-83a9-4daf-88db-e8506a3d4a08-kube-api-access-sz2cr\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037216 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rqpg\" (UniqueName: \"kubernetes.io/projected/0a89d202-f652-48bc-a28e-82c7790dfe94-kube-api-access-9rqpg\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037251 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037288 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfe97498-e7f6-4e19-ad51-65c34957b415-config\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037316 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-config\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037345 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-profile-collector-cert\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037381 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-oauth-serving-cert\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037409 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037440 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-plugins-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037472 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-metrics-certs\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037501 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-mountpoint-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037537 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-trusted-ca-bundle\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037568 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7271da0d-ff36-40b3-acfd-1d782e98db0f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fmph6\" (UID: \"7271da0d-ff36-40b3-acfd-1d782e98db0f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037598 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037631 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efbef706-f0b8-40d2-bbb1-517e5685dd30-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037659 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-service-ca\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037689 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e7a95b4-7024-4049-b322-ccdd11835356-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037719 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037751 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/77603536-1886-445b-b0e5-e0985948c137-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037782 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd8tm\" (UniqueName: \"kubernetes.io/projected/0a8119c9-c2f9-46d4-9dfc-942803871800-kube-api-access-dd8tm\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037809 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/3d9c8bba-0473-49f1-885c-0ae256709252-machine-approver-tls\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037843 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssmrb\" (UniqueName: \"kubernetes.io/projected/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-kube-api-access-ssmrb\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037899 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvzhv\" (UniqueName: \"kubernetes.io/projected/b3b48fcd-6341-4599-ae1e-f424e4a24f55-kube-api-access-gvzhv\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037946 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48cddb00-9308-423c-90df-9a0f52d6b6b3-serving-cert\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.037976 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-socket-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038008 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-serving-cert\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038035 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a8119c9-c2f9-46d4-9dfc-942803871800-webhook-cert\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038070 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj6lk\" (UniqueName: \"kubernetes.io/projected/f54b6f41-be22-4926-abd2-6c390df2fb49-kube-api-access-vj6lk\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038098 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efbef706-f0b8-40d2-bbb1-517e5685dd30-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038139 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/abd10370-83a9-4daf-88db-e8506a3d4a08-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038170 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-srv-cert\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038203 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gmrz\" (UniqueName: \"kubernetes.io/projected/efbef706-f0b8-40d2-bbb1-517e5685dd30-kube-api-access-8gmrz\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038231 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-proxy-tls\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038261 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038293 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f54b6f41-be22-4926-abd2-6c390df2fb49-metrics-tls\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038329 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038369 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3d9c8bba-0473-49f1-885c-0ae256709252-auth-proxy-config\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038399 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-dir\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038428 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/dfe97498-e7f6-4e19-ad51-65c34957b415-images\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038457 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a8119c9-c2f9-46d4-9dfc-942803871800-apiservice-cert\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038507 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-csi-data-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038540 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11988f58-cabc-4d36-9b5c-242fe3b570d3-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038570 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ttc7\" (UniqueName: \"kubernetes.io/projected/48cddb00-9308-423c-90df-9a0f52d6b6b3-kube-api-access-4ttc7\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038605 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a89d202-f652-48bc-a28e-82c7790dfe94-serving-cert\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038636 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qrkp4\" (UID: \"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038670 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d796c730-c411-400b-91a1-00149a3b408b-config\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038698 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-registration-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038747 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d796c730-c411-400b-91a1-00149a3b408b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038793 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-default-certificate\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.038825 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.039595 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11988f58-cabc-4d36-9b5c-242fe3b570d3-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.040625 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9c8bba-0473-49f1-885c-0ae256709252-config\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.040806 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/da458f3d-fa4b-40d1-b98b-903b4af624ce-service-ca-bundle\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.041046 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48cddb00-9308-423c-90df-9a0f52d6b6b3-config\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.041349 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-config\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.041527 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.042579 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/77603536-1886-445b-b0e5-e0985948c137-proxy-tls\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.042792 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efbef706-f0b8-40d2-bbb1-517e5685dd30-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.042944 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-images\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.043205 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5vrp\" (UniqueName: \"kubernetes.io/projected/6ed09764-a5a8-450e-8592-a4e487efecc6-kube-api-access-c5vrp\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.043933 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f8efa593-e2f5-475f-b7ff-cb4f53731b37-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-627th\" (UID: \"f8efa593-e2f5-475f-b7ff-cb4f53731b37\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.044249 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-ca\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.045359 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.048119 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.073916 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3d9c8bba-0473-49f1-885c-0ae256709252-auth-proxy-config\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.074039 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-dir\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.075020 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e7a95b4-7024-4049-b322-ccdd11835356-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.075078 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/dfe97498-e7f6-4e19-ad51-65c34957b415-images\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.076423 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-profile-collector-cert\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.080376 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-trusted-ca-bundle\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.081374 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a8119c9-c2f9-46d4-9dfc-942803871800-apiservice-cert\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.081427 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f63082de-7673-4f58-8f7e-4142b6efc221-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.082266 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-policies\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.082486 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfe97498-e7f6-4e19-ad51-65c34957b415-config\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.082929 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-oauth-serving-cert\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.083549 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.083556 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.083981 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/abd10370-83a9-4daf-88db-e8506a3d4a08-metrics-tls\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.086004 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-service-ca\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.086134 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a8119c9-c2f9-46d4-9dfc-942803871800-webhook-cert\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.086849 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/aef5966c-f9f0-4f49-b829-3c9f48b8420c-srv-cert\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.088739 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0a89d202-f652-48bc-a28e-82c7790dfe94-etcd-client\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.089758 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/aef5966c-f9f0-4f49-b829-3c9f48b8420c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.090539 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-srv-cert\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.090718 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.091064 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.091813 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d796c730-c411-400b-91a1-00149a3b408b-config\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.091825 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/dfe97498-e7f6-4e19-ad51-65c34957b415-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.092140 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.092400 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/705472d6-e8cd-483c-b315-b3a3df1da2a3-signing-key\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.092773 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-serving-cert\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.093693 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.094478 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.100299 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7271da0d-ff36-40b3-acfd-1d782e98db0f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fmph6\" (UID: \"7271da0d-ff36-40b3-acfd-1d782e98db0f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.100619 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-qhrqx"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.101018 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e7a95b4-7024-4049-b322-ccdd11835356-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.101444 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.102390 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/77603536-1886-445b-b0e5-e0985948c137-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.103092 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-thv97"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.105256 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.106623 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-config\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.108021 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.108120 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wp2s\" (UniqueName: \"kubernetes.io/projected/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-kube-api-access-4wp2s\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.109793 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-metrics-certs\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.109945 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/efbef706-f0b8-40d2-bbb1-517e5685dd30-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.110268 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a35d7f0c-719a-4d9b-8c0f-da8107cb44c1-proxy-tls\") pod \"machine-config-operator-74547568cd-6qrmc\" (UID: \"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.110749 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/3d9c8bba-0473-49f1-885c-0ae256709252-machine-approver-tls\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.110750 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11988f58-cabc-4d36-9b5c-242fe3b570d3-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.111329 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qrkp4\" (UID: \"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.112825 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a89d202-f652-48bc-a28e-82c7790dfe94-serving-cert\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.112966 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.113042 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-stats-auth\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.113756 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/da458f3d-fa4b-40d1-b98b-903b4af624ce-default-certificate\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.115155 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d796c730-c411-400b-91a1-00149a3b408b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.120991 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.121706 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e14d069-8a95-4e40-b130-f0c65506b423-secret-volume\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.122247 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48cddb00-9308-423c-90df-9a0f52d6b6b3-serving-cert\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.126238 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.126242 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.127430 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900-metrics-tls\") pod \"dns-operator-744455d44c-mrmfl\" (UID: \"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900\") " pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.127824 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-oauth-config\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.129548 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffggr\" (UniqueName: \"kubernetes.io/projected/705472d6-e8cd-483c-b315-b3a3df1da2a3-kube-api-access-ffggr\") pod \"service-ca-9c57cc56f-fdtwm\" (UID: \"705472d6-e8cd-483c-b315-b3a3df1da2a3\") " pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.130218 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgq28\" (UniqueName: \"kubernetes.io/projected/f8efa593-e2f5-475f-b7ff-cb4f53731b37-kube-api-access-fgq28\") pod \"multus-admission-controller-857f4d67dd-627th\" (UID: \"f8efa593-e2f5-475f-b7ff-cb4f53731b37\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.131068 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.136431 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbl4l\" (UniqueName: \"kubernetes.io/projected/11988f58-cabc-4d36-9b5c-242fe3b570d3-kube-api-access-tbl4l\") pod \"openshift-apiserver-operator-796bbdcf4f-f8bsv\" (UID: \"11988f58-cabc-4d36-9b5c-242fe3b570d3\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.136647 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.137471 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mwxjh"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.140122 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lqs2\" (UniqueName: \"kubernetes.io/projected/77603536-1886-445b-b0e5-e0985948c137-kube-api-access-9lqs2\") pod \"machine-config-controller-84d6567774-fcjnd\" (UID: \"77603536-1886-445b-b0e5-e0985948c137\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.146705 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.146940 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/6ee38646-5872-444d-ba8c-32b92ca36fff-node-bootstrap-token\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.146986 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/6ee38646-5872-444d-ba8c-32b92ca36fff-certs\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147013 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f54b6f41-be22-4926-abd2-6c390df2fb49-config-volume\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147085 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-plugins-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147104 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-mountpoint-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147149 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-socket-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147165 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj6lk\" (UniqueName: \"kubernetes.io/projected/f54b6f41-be22-4926-abd2-6c390df2fb49-kube-api-access-vj6lk\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147200 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f54b6f41-be22-4926-abd2-6c390df2fb49-metrics-tls\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147218 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-csi-data-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147242 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-registration-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147280 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4e04e48-8670-4b7a-9c72-e89c34bc835f-cert\") pod \"ingress-canary-vdmbf\" (UID: \"e4e04e48-8670-4b7a-9c72-e89c34bc835f\") " pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147297 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jj8g\" (UniqueName: \"kubernetes.io/projected/e4e04e48-8670-4b7a-9c72-e89c34bc835f-kube-api-access-2jj8g\") pod \"ingress-canary-vdmbf\" (UID: \"e4e04e48-8670-4b7a-9c72-e89c34bc835f\") " pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147336 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82jxc\" (UniqueName: \"kubernetes.io/projected/6ee38646-5872-444d-ba8c-32b92ca36fff-kube-api-access-82jxc\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147387 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g2lp\" (UniqueName: \"kubernetes.io/projected/5350f539-bc65-4421-b3c2-cf7dc0d52e98-kube-api-access-7g2lp\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147555 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-hppgr"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.147898 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-socket-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.148018 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.647993244 +0000 UTC m=+139.460162275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.150967 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4e04e48-8670-4b7a-9c72-e89c34bc835f-cert\") pod \"ingress-canary-vdmbf\" (UID: \"e4e04e48-8670-4b7a-9c72-e89c34bc835f\") " pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.151083 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-csi-data-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.151132 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-registration-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.151186 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f54b6f41-be22-4926-abd2-6c390df2fb49-metrics-tls\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.151182 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-plugins-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.151227 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5350f539-bc65-4421-b3c2-cf7dc0d52e98-mountpoint-dir\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.152439 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.152798 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f54b6f41-be22-4926-abd2-6c390df2fb49-config-volume\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.154520 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/6ee38646-5872-444d-ba8c-32b92ca36fff-node-bootstrap-token\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.155327 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/6ee38646-5872-444d-ba8c-32b92ca36fff-certs\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.160002 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzgfj\" (UniqueName: \"kubernetes.io/projected/e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a-kube-api-access-mzgfj\") pod \"control-plane-machine-set-operator-78cbb6b69f-qrkp4\" (UID: \"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.160555 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.177184 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t6xb\" (UniqueName: \"kubernetes.io/projected/fb8a5762-1a9d-4011-a049-592cd5f33244-kube-api-access-6t6xb\") pod \"marketplace-operator-79b997595-rsck8\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.194935 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpf6h\" (UniqueName: \"kubernetes.io/projected/1e14d069-8a95-4e40-b130-f0c65506b423-kube-api-access-rpf6h\") pod \"collect-profiles-29484960-qpvc5\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.211850 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.215239 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mjs7\" (UniqueName: \"kubernetes.io/projected/da458f3d-fa4b-40d1-b98b-903b4af624ce-kube-api-access-2mjs7\") pod \"router-default-5444994796-4tdkk\" (UID: \"da458f3d-fa4b-40d1-b98b-903b4af624ce\") " pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.220871 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.234841 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2g76\" (UniqueName: \"kubernetes.io/projected/dfe97498-e7f6-4e19-ad51-65c34957b415-kube-api-access-z2g76\") pod \"machine-api-operator-5694c8668f-fthxg\" (UID: \"dfe97498-e7f6-4e19-ad51-65c34957b415\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.246731 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.249058 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.249283 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.249655 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.749632044 +0000 UTC m=+139.561801075 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.258727 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g82z\" (UniqueName: \"kubernetes.io/projected/aef5966c-f9f0-4f49-b829-3c9f48b8420c-kube-api-access-7g82z\") pod \"olm-operator-6b444d44fb-zt7sw\" (UID: \"aef5966c-f9f0-4f49-b829-3c9f48b8420c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.277645 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz2cr\" (UniqueName: \"kubernetes.io/projected/abd10370-83a9-4daf-88db-e8506a3d4a08-kube-api-access-sz2cr\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.278464 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-v7spn"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.298977 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5vrp\" (UniqueName: \"kubernetes.io/projected/6ed09764-a5a8-450e-8592-a4e487efecc6-kube-api-access-c5vrp\") pod \"oauth-openshift-558db77b4-db6bl\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.317873 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbtsp\" (UniqueName: \"kubernetes.io/projected/3d9c8bba-0473-49f1-885c-0ae256709252-kube-api-access-zbtsp\") pod \"machine-approver-56656f9798-g59x2\" (UID: \"3d9c8bba-0473-49f1-885c-0ae256709252\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.333152 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k5j4\" (UniqueName: \"kubernetes.io/projected/7271da0d-ff36-40b3-acfd-1d782e98db0f-kube-api-access-7k5j4\") pod \"package-server-manager-789f6589d5-fmph6\" (UID: \"7271da0d-ff36-40b3-acfd-1d782e98db0f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:15 crc kubenswrapper[5032]: W0122 16:07:15.343069 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f8a4120_f6d5_4b4d_824a_77d4bf61ea9a.slice/crio-abc2be5f96a085e68cc0f440fe895e65db172035dc9828b94abd05045f6de53e WatchSource:0}: Error finding container abc2be5f96a085e68cc0f440fe895e65db172035dc9828b94abd05045f6de53e: Status 404 returned error can't find the container with id abc2be5f96a085e68cc0f440fe895e65db172035dc9828b94abd05045f6de53e Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.350326 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.350435 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.850418374 +0000 UTC m=+139.662587405 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.350810 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.351351 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.851340516 +0000 UTC m=+139.663509557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.358050 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.359338 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tl4d\" (UniqueName: \"kubernetes.io/projected/f63082de-7673-4f58-8f7e-4142b6efc221-kube-api-access-2tl4d\") pod \"kube-storage-version-migrator-operator-b67b599dd-scxq9\" (UID: \"f63082de-7673-4f58-8f7e-4142b6efc221\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.380503 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efbef706-f0b8-40d2-bbb1-517e5685dd30-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: W0122 16:07:15.402505 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda458f3d_fa4b_40d1_b98b_903b4af624ce.slice/crio-12dec69845f0ca5a2fabccd33635933f6b78d0cf4c3d044ee2c7553c478a2e5e WatchSource:0}: Error finding container 12dec69845f0ca5a2fabccd33635933f6b78d0cf4c3d044ee2c7553c478a2e5e: Status 404 returned error can't find the container with id 12dec69845f0ca5a2fabccd33635933f6b78d0cf4c3d044ee2c7553c478a2e5e Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.409256 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.414186 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.423941 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.425486 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e7a95b4-7024-4049-b322-ccdd11835356-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-klx24\" (UID: \"1e7a95b4-7024-4049-b322-ccdd11835356\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.433289 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.433973 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.438650 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.441391 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" event={"ID":"3c22e380-d63b-4d02-a3a5-2e241c71fc16","Type":"ContainerStarted","Data":"8c97db7ff086a6ff2cb290e53699fdab76a75405b9a0e357f8f9f47ae108469a"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.442593 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr9mc\" (UniqueName: \"kubernetes.io/projected/22d00a1b-b830-4b42-bc79-536d6e1f92e8-kube-api-access-hr9mc\") pod \"migrator-59844c95c7-wmgm5\" (UID: \"22d00a1b-b830-4b42-bc79-536d6e1f92e8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.445105 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" event={"ID":"632f2319-d263-4e72-88ea-76f60ee7121e","Type":"ContainerStarted","Data":"14684f7f0e95f4e0499c223125ce27ac76331b5202fc7b3ffde04542dfe4955d"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.446288 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.450258 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4tdkk" event={"ID":"da458f3d-fa4b-40d1-b98b-903b4af624ce","Type":"ContainerStarted","Data":"12dec69845f0ca5a2fabccd33635933f6b78d0cf4c3d044ee2c7553c478a2e5e"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.451837 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.452251 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:15.952233148 +0000 UTC m=+139.764402179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.453061 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" event={"ID":"5de5146a-a4b0-4f38-963c-1d9c101ec864","Type":"ContainerStarted","Data":"074e015706ffa51e871392e50787156a594dd1940ed2110c4c7792974974bbe7"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.461461 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/abd10370-83a9-4daf-88db-e8506a3d4a08-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ht7fx\" (UID: \"abd10370-83a9-4daf-88db-e8506a3d4a08\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.480296 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" event={"ID":"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a","Type":"ContainerStarted","Data":"abc2be5f96a085e68cc0f440fe895e65db172035dc9828b94abd05045f6de53e"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.480664 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.484100 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gmrz\" (UniqueName: \"kubernetes.io/projected/efbef706-f0b8-40d2-bbb1-517e5685dd30-kube-api-access-8gmrz\") pod \"cluster-image-registry-operator-dc59b4c8b-qcblh\" (UID: \"efbef706-f0b8-40d2-bbb1-517e5685dd30\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.486088 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" event={"ID":"e7d624b9-2fed-4924-90f5-9918050b8074","Type":"ContainerStarted","Data":"03f48fba8bc7765ba024ddd9bba1f4a701eb634e2d0838019962af1c32fe34f1"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.488299 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" event={"ID":"a6acdcf7-347a-42db-83a3-e5fc2bde8daa","Type":"ContainerStarted","Data":"91f242f8425f0e36cd510dddb1e370e0eb57caa9f870acc1952b8556329759d2"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.490124 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hppgr" event={"ID":"8b534bfa-1e36-4825-bbdb-aec579b973f8","Type":"ContainerStarted","Data":"722d0cbd2c78d8f0e8cc48c949a8ce9ced7e41188d381bab1c737b42a441637e"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.493621 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" event={"ID":"52d51718-aef8-4657-859f-aae6bb36d879","Type":"ContainerStarted","Data":"d9609d00d0ba1a2230756727daf3632e2a93d7827316c19ac0b12ae8bfb08500"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.493669 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.497168 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" event={"ID":"6b35b27c-b438-4743-b8e3-ff4e510c497d","Type":"ContainerStarted","Data":"0453aff77a0787f568ac5bbbb381d5a9593409f2148d23ef12d73219e27674d6"} Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.504304 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.504534 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d796c730-c411-400b-91a1-00149a3b408b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-h22sw\" (UID: \"d796c730-c411-400b-91a1-00149a3b408b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.525971 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.527426 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ttc7\" (UniqueName: \"kubernetes.io/projected/48cddb00-9308-423c-90df-9a0f52d6b6b3-kube-api-access-4ttc7\") pod \"service-ca-operator-777779d784-48r96\" (UID: \"48cddb00-9308-423c-90df-9a0f52d6b6b3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.536120 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvzhv\" (UniqueName: \"kubernetes.io/projected/b3b48fcd-6341-4599-ae1e-f424e4a24f55-kube-api-access-gvzhv\") pod \"console-f9d7485db-w7mns\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.553808 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.554236 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.054218866 +0000 UTC m=+139.866387897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.561786 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssmrb\" (UniqueName: \"kubernetes.io/projected/5487a4a3-2a12-432d-8631-ca5dbeaaaf7f-kube-api-access-ssmrb\") pod \"catalog-operator-68c6474976-mt7xg\" (UID: \"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.586028 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.597475 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq8g7\" (UniqueName: \"kubernetes.io/projected/85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900-kube-api-access-xq8g7\") pod \"dns-operator-744455d44c-mrmfl\" (UID: \"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900\") " pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.606892 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-fdtwm"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.611497 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rqpg\" (UniqueName: \"kubernetes.io/projected/0a89d202-f652-48bc-a28e-82c7790dfe94-kube-api-access-9rqpg\") pod \"etcd-operator-b45778765-dqp7n\" (UID: \"0a89d202-f652-48bc-a28e-82c7790dfe94\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.614053 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd8tm\" (UniqueName: \"kubernetes.io/projected/0a8119c9-c2f9-46d4-9dfc-942803871800-kube-api-access-dd8tm\") pod \"packageserver-d55dfcdfc-6psnz\" (UID: \"0a8119c9-c2f9-46d4-9dfc-942803871800\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.638762 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g2lp\" (UniqueName: \"kubernetes.io/projected/5350f539-bc65-4421-b3c2-cf7dc0d52e98-kube-api-access-7g2lp\") pod \"csi-hostpathplugin-xdxqs\" (UID: \"5350f539-bc65-4421-b3c2-cf7dc0d52e98\") " pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.639961 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rsck8"] Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.653219 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.654511 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.654766 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.154733549 +0000 UTC m=+139.966902580 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.654843 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.655219 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.155212941 +0000 UTC m=+139.967381972 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.656277 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj6lk\" (UniqueName: \"kubernetes.io/projected/f54b6f41-be22-4926-abd2-6c390df2fb49-kube-api-access-vj6lk\") pod \"dns-default-p6hqk\" (UID: \"f54b6f41-be22-4926-abd2-6c390df2fb49\") " pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.666431 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.674360 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jj8g\" (UniqueName: \"kubernetes.io/projected/e4e04e48-8670-4b7a-9c72-e89c34bc835f-kube-api-access-2jj8g\") pod \"ingress-canary-vdmbf\" (UID: \"e4e04e48-8670-4b7a-9c72-e89c34bc835f\") " pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.691930 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.694450 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82jxc\" (UniqueName: \"kubernetes.io/projected/6ee38646-5872-444d-ba8c-32b92ca36fff-kube-api-access-82jxc\") pod \"machine-config-server-dq8vl\" (UID: \"6ee38646-5872-444d-ba8c-32b92ca36fff\") " pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.695281 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.702103 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.752733 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc"] Jan 22 16:07:15 crc kubenswrapper[5032]: W0122 16:07:15.755106 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod705472d6_e8cd_483c_b315_b3a3df1da2a3.slice/crio-b959cfe72dde0766233f6b1f2c7ed516d95e0f23eeedea39d0c46e5be3e7ebd4 WatchSource:0}: Error finding container b959cfe72dde0766233f6b1f2c7ed516d95e0f23eeedea39d0c46e5be3e7ebd4: Status 404 returned error can't find the container with id b959cfe72dde0766233f6b1f2c7ed516d95e0f23eeedea39d0c46e5be3e7ebd4 Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.756056 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.756212 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.256184013 +0000 UTC m=+140.068353034 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.756444 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.756840 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.256821239 +0000 UTC m=+140.068990270 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.766256 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.772905 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.802571 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.839494 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-dq8vl" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.840581 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.859705 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.861331 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hzc8"] Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.861906 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.36013976 +0000 UTC m=+140.172308791 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.862490 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.883625 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-vdmbf" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.904769 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" Jan 22 16:07:15 crc kubenswrapper[5032]: I0122 16:07:15.962145 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:15 crc kubenswrapper[5032]: E0122 16:07:15.962784 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.462766374 +0000 UTC m=+140.274935405 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.002016 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4"] Jan 22 16:07:16 crc kubenswrapper[5032]: W0122 16:07:16.012602 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc01acdfa_c0a1_46bb_a57e_e616540daeaa.slice/crio-41e787b8fb078563ec06d809f1251d90e8580da5ef63515ce30fdd15b0a654c8 WatchSource:0}: Error finding container 41e787b8fb078563ec06d809f1251d90e8580da5ef63515ce30fdd15b0a654c8: Status 404 returned error can't find the container with id 41e787b8fb078563ec06d809f1251d90e8580da5ef63515ce30fdd15b0a654c8 Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.024427 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd"] Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.063151 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.063539 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.563525332 +0000 UTC m=+140.375694363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.164760 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.165400 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.665387548 +0000 UTC m=+140.477556579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.239318 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-w7mns"] Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.265956 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.266121 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.766094195 +0000 UTC m=+140.578263226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.266232 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.266622 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.766606478 +0000 UTC m=+140.578775509 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.271561 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24"] Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.367446 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.367831 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.867815317 +0000 UTC m=+140.679984348 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.472764 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.473353 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:16.973339202 +0000 UTC m=+140.785508233 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.504028 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-627th"] Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.543976 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" event={"ID":"6b35b27c-b438-4743-b8e3-ff4e510c497d","Type":"ContainerStarted","Data":"734abae5f63141632288d381b23cc9e8a57897b76662b61012d090975ab25cbf"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.558458 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.564097 5032 generic.go:334] "Generic (PLEG): container finished" podID="632f2319-d263-4e72-88ea-76f60ee7121e" containerID="76ca5052ca1c09a25eb502384490693fb98049014b63eb550e40a7ebf811651b" exitCode=0 Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.564186 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" event={"ID":"632f2319-d263-4e72-88ea-76f60ee7121e","Type":"ContainerDied","Data":"76ca5052ca1c09a25eb502384490693fb98049014b63eb550e40a7ebf811651b"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.566033 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" event={"ID":"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1","Type":"ContainerStarted","Data":"e21b5e20cd57e92fc48610ebe8276a02e417949a6e4cacb1d9206da63ba30fa6"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.567391 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" event={"ID":"1e14d069-8a95-4e40-b130-f0c65506b423","Type":"ContainerStarted","Data":"1bbf239e86add940465c5ca44bbe8451270efba56000c4a6057ee60176a3e512"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.583728 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.584095 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.084079614 +0000 UTC m=+140.896248645 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.596795 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" event={"ID":"e7d624b9-2fed-4924-90f5-9918050b8074","Type":"ContainerStarted","Data":"933c953ad55f7b95359f17641a7ea6ab27867a4d46ba335d7fe3a2ebf5860613"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.627101 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" event={"ID":"52d51718-aef8-4657-859f-aae6bb36d879","Type":"ContainerStarted","Data":"d666875f70cf7775c4b85843ee5c2fdf7258074a001edc203eb08db19a03eefe"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.627814 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.628476 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9"] Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.670734 5032 patch_prober.go:28] interesting pod/console-operator-58897d9998-qhrqx container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.672381 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" podUID="6b35b27c-b438-4743-b8e3-ff4e510c497d" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.671693 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" event={"ID":"3d9c8bba-0473-49f1-885c-0ae256709252","Type":"ContainerStarted","Data":"06cb9d919896f272cd084e2279ac0d941a79617cb76de6ec50b4789b748de117"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.698031 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.700343 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.20032922 +0000 UTC m=+141.012498251 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.709530 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5"] Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.719793 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" event={"ID":"ceb9b499-7c79-4268-9e2c-6c65e8604713","Type":"ContainerStarted","Data":"ec1d453a90e6104ad49d00eff663b09186877a1f5fcdf4f1047bb5ff08f8c6b6"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.720528 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" event={"ID":"ceb9b499-7c79-4268-9e2c-6c65e8604713","Type":"ContainerStarted","Data":"6104f2853d02bd0dc2237389b33d9ef28adcc41a59b6ab6137cafa6ce694a1ea"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.761544 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" event={"ID":"705472d6-e8cd-483c-b315-b3a3df1da2a3","Type":"ContainerStarted","Data":"b959cfe72dde0766233f6b1f2c7ed516d95e0f23eeedea39d0c46e5be3e7ebd4"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.781064 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" event={"ID":"77603536-1886-445b-b0e5-e0985948c137","Type":"ContainerStarted","Data":"d49248cbae79f2b09681c874747f71342c3d0ac983a28bb9ac2e8670af1850b5"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.798924 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.799306 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.299290285 +0000 UTC m=+141.111459306 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.804303 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" event={"ID":"c01acdfa-c0a1-46bb-a57e-e616540daeaa","Type":"ContainerStarted","Data":"41e787b8fb078563ec06d809f1251d90e8580da5ef63515ce30fdd15b0a654c8"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.877611 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.901152 5032 generic.go:334] "Generic (PLEG): container finished" podID="a6acdcf7-347a-42db-83a3-e5fc2bde8daa" containerID="685d249d5b98332b8458b5c9a540fbeb9dbc1cf864ac35b0d0ed3f472f90e95b" exitCode=0 Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.901248 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" event={"ID":"a6acdcf7-347a-42db-83a3-e5fc2bde8daa","Type":"ContainerDied","Data":"685d249d5b98332b8458b5c9a540fbeb9dbc1cf864ac35b0d0ed3f472f90e95b"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.901772 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:16 crc kubenswrapper[5032]: E0122 16:07:16.902171 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.402157465 +0000 UTC m=+141.214326496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.916838 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hppgr" event={"ID":"8b534bfa-1e36-4825-bbdb-aec579b973f8","Type":"ContainerStarted","Data":"07e7356e3bb3f87104cf3b904cfaae66566727ddff7a7fff707acbfa0efc98c7"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.917987 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-hppgr" Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.920247 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" event={"ID":"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a","Type":"ContainerStarted","Data":"d83c74f0a3b1602fc349ec78b10fbfe5b39a59f51c79e08fa8e82720599fc72c"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.922034 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" event={"ID":"fb8a5762-1a9d-4011-a049-592cd5f33244","Type":"ContainerStarted","Data":"057fd4b8d5b2d4fe940a1acaac77e2810031b08a6b637cc841c30ee9f56c9b6e"} Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.927411 5032 patch_prober.go:28] interesting pod/downloads-7954f5f757-hppgr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.927448 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hppgr" podUID="8b534bfa-1e36-4825-bbdb-aec579b973f8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 22 16:07:16 crc kubenswrapper[5032]: I0122 16:07:16.945252 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" event={"ID":"3c22e380-d63b-4d02-a3a5-2e241c71fc16","Type":"ContainerStarted","Data":"69f613662451382b0a7bf5ba76f04577bff4f77d9a19facd7a58b85d51db683b"} Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.003653 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" event={"ID":"5de5146a-a4b0-4f38-963c-1d9c101ec864","Type":"ContainerStarted","Data":"ef6ef6cf0a7171a49e0c50d73fc91c0c30dac3b491f26399da8cfa779febe8f0"} Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.004289 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.005543 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.505528467 +0000 UTC m=+141.317697498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.119710 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.122436 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.622422709 +0000 UTC m=+141.434591740 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.227273 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.227457 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.727427692 +0000 UTC m=+141.539596723 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.227899 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.228225 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.728210781 +0000 UTC m=+141.540379812 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.285762 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-q7skp" podStartSLOduration=121.285740114 podStartE2EDuration="2m1.285740114s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:17.249189172 +0000 UTC m=+141.061358203" watchObservedRunningTime="2026-01-22 16:07:17.285740114 +0000 UTC m=+141.097909145" Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.289286 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" podStartSLOduration=121.289281381 podStartE2EDuration="2m1.289281381s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:17.287524138 +0000 UTC m=+141.099693169" watchObservedRunningTime="2026-01-22 16:07:17.289281381 +0000 UTC m=+141.101450412" Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.289644 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-db6bl"] Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.329347 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.329966 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.829949813 +0000 UTC m=+141.642118844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.335779 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-mwxjh" podStartSLOduration=121.335753195 podStartE2EDuration="2m1.335753195s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:17.333832218 +0000 UTC m=+141.146001249" watchObservedRunningTime="2026-01-22 16:07:17.335753195 +0000 UTC m=+141.147922226" Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.430769 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.431163 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:17.931148802 +0000 UTC m=+141.743317833 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.473841 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" podStartSLOduration=120.473810013 podStartE2EDuration="2m0.473810013s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:17.470680327 +0000 UTC m=+141.282849368" watchObservedRunningTime="2026-01-22 16:07:17.473810013 +0000 UTC m=+141.285979044" Jan 22 16:07:17 crc kubenswrapper[5032]: W0122 16:07:17.518054 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ed09764_a5a8_450e_8592_a4e487efecc6.slice/crio-6563076053b81de010939ef4da32cc2501e45a77317d8f11e41756776d145176 WatchSource:0}: Error finding container 6563076053b81de010939ef4da32cc2501e45a77317d8f11e41756776d145176: Status 404 returned error can't find the container with id 6563076053b81de010939ef4da32cc2501e45a77317d8f11e41756776d145176 Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.532042 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.532479 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.032462574 +0000 UTC m=+141.844631605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.598349 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-hppgr" podStartSLOduration=121.598328852 podStartE2EDuration="2m1.598328852s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:17.561011371 +0000 UTC m=+141.373180402" watchObservedRunningTime="2026-01-22 16:07:17.598328852 +0000 UTC m=+141.410497883" Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.633518 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.633939 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.13392695 +0000 UTC m=+141.946095981 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.683945 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nqd5d" podStartSLOduration=121.68392539 podStartE2EDuration="2m1.68392539s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:17.678718243 +0000 UTC m=+141.490887274" watchObservedRunningTime="2026-01-22 16:07:17.68392539 +0000 UTC m=+141.496094421" Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.734165 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.734633 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.234613747 +0000 UTC m=+142.046782778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.839997 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.840732 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.340716296 +0000 UTC m=+142.152885327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.852670 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f8a4120_f6d5_4b4d_824a_77d4bf61ea9a.slice/crio-5b324de0f7816eec730bca95a7c0d1c5f6897afa3e05542ac7a7d96c21b6ab82.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:07:17 crc kubenswrapper[5032]: I0122 16:07:17.945683 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:17 crc kubenswrapper[5032]: E0122 16:07:17.946386 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.446359393 +0000 UTC m=+142.258528424 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.050202 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.050707 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.550692409 +0000 UTC m=+142.362861440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.053525 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" event={"ID":"22d00a1b-b830-4b42-bc79-536d6e1f92e8","Type":"ContainerStarted","Data":"d2383b2adbe0c3db59ca34bc693c713be2e33f35df92c072a97a60696fcac757"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.087043 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" event={"ID":"c01acdfa-c0a1-46bb-a57e-e616540daeaa","Type":"ContainerStarted","Data":"03bd51209aaf84097f883217b2475d582e0a9e3e1fbf0df8e481c1a43298d51b"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.088275 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.116303 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.126201 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" podStartSLOduration=122.126171371 podStartE2EDuration="2m2.126171371s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.109494744 +0000 UTC m=+141.921663775" watchObservedRunningTime="2026-01-22 16:07:18.126171371 +0000 UTC m=+141.938340402" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.138674 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.151456 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.151777 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.651761465 +0000 UTC m=+142.463930496 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.163580 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.178975 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.179146 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" event={"ID":"fb8a5762-1a9d-4011-a049-592cd5f33244","Type":"ContainerStarted","Data":"af89b2f3a07b52d76cd04d87ad6aacb69443be78e0b12b6c5bb73fbddf14ab79"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.179577 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.184299 5032 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rsck8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" start-of-body= Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.184367 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.42:8080/healthz\": dial tcp 10.217.0.42:8080: connect: connection refused" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.195586 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.216850 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.218700 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-vdmbf"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.225231 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-fthxg"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.250179 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-dq8vl" event={"ID":"6ee38646-5872-444d-ba8c-32b92ca36fff","Type":"ContainerStarted","Data":"1bdd484852ed71cd10ad3916ccfc5ff6b2da7e817c9199afed3d322683694b79"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.250234 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-dq8vl" event={"ID":"6ee38646-5872-444d-ba8c-32b92ca36fff","Type":"ContainerStarted","Data":"b34961f2ec095b5c40f309b0fe7e29a3c41574d114d7dd432aa763ffe83a0284"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.254120 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.257868 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" podStartSLOduration=121.257834933 podStartE2EDuration="2m1.257834933s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.248322551 +0000 UTC m=+142.060491582" watchObservedRunningTime="2026-01-22 16:07:18.257834933 +0000 UTC m=+142.070003964" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.259513 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dqp7n"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.259549 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6"] Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.260249 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.760234702 +0000 UTC m=+142.572403733 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.273011 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-mrmfl"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.310576 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" podStartSLOduration=121.31055836 podStartE2EDuration="2m1.31055836s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.284897234 +0000 UTC m=+142.097066265" watchObservedRunningTime="2026-01-22 16:07:18.31055836 +0000 UTC m=+142.122727391" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.310886 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-dq8vl" podStartSLOduration=6.310883248 podStartE2EDuration="6.310883248s" podCreationTimestamp="2026-01-22 16:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.310233842 +0000 UTC m=+142.122402873" watchObservedRunningTime="2026-01-22 16:07:18.310883248 +0000 UTC m=+142.123052279" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.320352 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" event={"ID":"6ed09764-a5a8-450e-8592-a4e487efecc6","Type":"ContainerStarted","Data":"6563076053b81de010939ef4da32cc2501e45a77317d8f11e41756776d145176"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.340117 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" podStartSLOduration=122.340096051 podStartE2EDuration="2m2.340096051s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.338286967 +0000 UTC m=+142.150455998" watchObservedRunningTime="2026-01-22 16:07:18.340096051 +0000 UTC m=+142.152265082" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.342853 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" event={"ID":"f8efa593-e2f5-475f-b7ff-cb4f53731b37","Type":"ContainerStarted","Data":"04867c6e0cd0d8d5270e1d29cc95e254ad46ac81ea99d434db5befe20e414792"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.348378 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" event={"ID":"f63082de-7673-4f58-8f7e-4142b6efc221","Type":"ContainerStarted","Data":"14309312c399123e18c3b595e6caf0fa51b57b37106d3413c0dbd729efb69fd7"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.353316 5032 generic.go:334] "Generic (PLEG): container finished" podID="5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a" containerID="5b324de0f7816eec730bca95a7c0d1c5f6897afa3e05542ac7a7d96c21b6ab82" exitCode=0 Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.353378 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" event={"ID":"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a","Type":"ContainerDied","Data":"5b324de0f7816eec730bca95a7c0d1c5f6897afa3e05542ac7a7d96c21b6ab82"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.360264 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.361429 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.861412771 +0000 UTC m=+142.673581802 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: W0122 16:07:18.371742 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabd10370_83a9_4daf_88db_e8506a3d4a08.slice/crio-b76986a2df069dd740b5c6a0841d6386652522ba6481a3c252484ed9cb29f156 WatchSource:0}: Error finding container b76986a2df069dd740b5c6a0841d6386652522ba6481a3c252484ed9cb29f156: Status 404 returned error can't find the container with id b76986a2df069dd740b5c6a0841d6386652522ba6481a3c252484ed9cb29f156 Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.387012 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" podStartSLOduration=122.386994225 podStartE2EDuration="2m2.386994225s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.378691372 +0000 UTC m=+142.190860403" watchObservedRunningTime="2026-01-22 16:07:18.386994225 +0000 UTC m=+142.199163246" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.388725 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-48r96"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.393827 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" event={"ID":"e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a","Type":"ContainerStarted","Data":"9b974f243ca4f6f6147d634ec5af98f87742051ad4155dc0bce7f6b905e583f3"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.417660 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-w7mns" event={"ID":"b3b48fcd-6341-4599-ae1e-f424e4a24f55","Type":"ContainerStarted","Data":"9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.417705 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-w7mns" event={"ID":"b3b48fcd-6341-4599-ae1e-f424e4a24f55","Type":"ContainerStarted","Data":"1b194b15fcf312b4877c33f9a441b87ec87f0a76c3cc6b1d484216cfb79c9663"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.462069 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.464224 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:18.964211449 +0000 UTC m=+142.776380480 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.531445 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qrkp4" podStartSLOduration=122.531421969 podStartE2EDuration="2m2.531421969s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.529358379 +0000 UTC m=+142.341527420" watchObservedRunningTime="2026-01-22 16:07:18.531421969 +0000 UTC m=+142.343591160" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.554517 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" event={"ID":"77603536-1886-445b-b0e5-e0985948c137","Type":"ContainerStarted","Data":"348d7e34ee1be4ac35bb3d29e2a0388004cd9b4bcebfa7b1d428cc0e1cae5279"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.554548 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4tdkk" event={"ID":"da458f3d-fa4b-40d1-b98b-903b4af624ce","Type":"ContainerStarted","Data":"d546dee7e20fcce728e018094c9d2a014af91628805f100f9f2d2af45e6d9e2c"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.554568 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xdxqs"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.554581 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.554590 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-p6hqk"] Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.554600 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh"] Jan 22 16:07:18 crc kubenswrapper[5032]: W0122 16:07:18.554671 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5350f539_bc65_4421_b3c2_cf7dc0d52e98.slice/crio-56f630ca7b65ab9f05fb6092ac723e838dd3b0e192305376b7572207c5b86426 WatchSource:0}: Error finding container 56f630ca7b65ab9f05fb6092ac723e838dd3b0e192305376b7572207c5b86426: Status 404 returned error can't find the container with id 56f630ca7b65ab9f05fb6092ac723e838dd3b0e192305376b7572207c5b86426 Jan 22 16:07:18 crc kubenswrapper[5032]: W0122 16:07:18.559161 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf54b6f41_be22_4926_abd2_6c390df2fb49.slice/crio-421ef555447aa524303353f32ec80f70cd27bdd0c0b09b61869593fae11a078a WatchSource:0}: Error finding container 421ef555447aa524303353f32ec80f70cd27bdd0c0b09b61869593fae11a078a: Status 404 returned error can't find the container with id 421ef555447aa524303353f32ec80f70cd27bdd0c0b09b61869593fae11a078a Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.563258 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.564486 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.064469595 +0000 UTC m=+142.876638626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.570448 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" event={"ID":"1e7a95b4-7024-4049-b322-ccdd11835356","Type":"ContainerStarted","Data":"59ad96c86b011d2b6c4e96fa994a2ec19ba9fd6d362f2adf0a3caca450a4c7dd"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.596942 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-w7mns" podStartSLOduration=122.596915517 podStartE2EDuration="2m2.596915517s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.575804962 +0000 UTC m=+142.387973993" watchObservedRunningTime="2026-01-22 16:07:18.596915517 +0000 UTC m=+142.409084548" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.609260 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" event={"ID":"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1","Type":"ContainerStarted","Data":"b9e2223d94e62e8999c0939ad1480a70d102b922a5114f2c55cb7b98dd80a52b"} Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.620443 5032 patch_prober.go:28] interesting pod/downloads-7954f5f757-hppgr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.620490 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hppgr" podUID="8b534bfa-1e36-4825-bbdb-aec579b973f8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.637510 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-qhrqx" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.674803 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.676444 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.176430617 +0000 UTC m=+142.988599648 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.698135 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" podStartSLOduration=122.698113346 podStartE2EDuration="2m2.698113346s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.64664558 +0000 UTC m=+142.458814611" watchObservedRunningTime="2026-01-22 16:07:18.698113346 +0000 UTC m=+142.510282377" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.748101 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-4tdkk" podStartSLOduration=122.748080125 podStartE2EDuration="2m2.748080125s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.698894145 +0000 UTC m=+142.511063186" watchObservedRunningTime="2026-01-22 16:07:18.748080125 +0000 UTC m=+142.560249156" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.749186 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" podStartSLOduration=122.749181602 podStartE2EDuration="2m2.749181602s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:18.747339467 +0000 UTC m=+142.559508488" watchObservedRunningTime="2026-01-22 16:07:18.749181602 +0000 UTC m=+142.561350633" Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.775882 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.776628 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.276613552 +0000 UTC m=+143.088782583 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.877561 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.878309 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.378298043 +0000 UTC m=+143.190467074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.987165 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.987523 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.487502417 +0000 UTC m=+143.299671448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:18 crc kubenswrapper[5032]: I0122 16:07:18.987580 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:18 crc kubenswrapper[5032]: E0122 16:07:18.988045 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.48802693 +0000 UTC m=+143.300195971 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.090691 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.091083 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.591066674 +0000 UTC m=+143.403235705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.195337 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.195686 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.695673957 +0000 UTC m=+143.507842988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.227770 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.238655 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:19 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:19 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:19 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.239089 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.307713 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.308173 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.808155411 +0000 UTC m=+143.620324442 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.412648 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.413147 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:19.913132582 +0000 UTC m=+143.725301613 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.513795 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.513978 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.013953192 +0000 UTC m=+143.826122223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.514088 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.514378 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.014365492 +0000 UTC m=+143.826534523 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.617668 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.618531 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.118497073 +0000 UTC m=+143.930666104 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.618804 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.619366 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.119353363 +0000 UTC m=+143.931522394 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.720513 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.720979 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.220962893 +0000 UTC m=+144.033131924 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.730880 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" event={"ID":"f8efa593-e2f5-475f-b7ff-cb4f53731b37","Type":"ContainerStarted","Data":"ec8b9a7850cd31ba50e2edd0526e8be4b306dbf42a8c5932b0a24ea7c724eb31"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.730918 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" event={"ID":"f8efa593-e2f5-475f-b7ff-cb4f53731b37","Type":"ContainerStarted","Data":"69253098a5a16037f52d6bbe143c2bb2d93d47366491ead1a8cbb1889d160dd5"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.764250 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-627th" podStartSLOduration=123.764227618 podStartE2EDuration="2m3.764227618s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:19.763289035 +0000 UTC m=+143.575458066" watchObservedRunningTime="2026-01-22 16:07:19.764227618 +0000 UTC m=+143.576396649" Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.827734 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.828762 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.328743213 +0000 UTC m=+144.140912244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.829245 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-scxq9" event={"ID":"f63082de-7673-4f58-8f7e-4142b6efc221","Type":"ContainerStarted","Data":"4929955f1cb408b7733a38058672852475296e403da8cd8d67f1149128135d5a"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.876435 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" event={"ID":"6ed09764-a5a8-450e-8592-a4e487efecc6","Type":"ContainerStarted","Data":"0034ac84dfebb3512ba4623425af990599deb3fe8246700a715286b51def682b"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.877174 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.899237 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" event={"ID":"aef5966c-f9f0-4f49-b829-3c9f48b8420c","Type":"ContainerStarted","Data":"58a88481ce83b8e5facf0dee008140a4c16835eb2d21b086ada353fc3f963ac3"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.899294 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" event={"ID":"aef5966c-f9f0-4f49-b829-3c9f48b8420c","Type":"ContainerStarted","Data":"d0773fe205633ccf39835e6ddf5436aed0b1cc55d5d4d8398b9c8cf5a9d23bac"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.900275 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.903674 5032 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-zt7sw container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.903745 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" podUID="aef5966c-f9f0-4f49-b829-3c9f48b8420c" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.924029 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" event={"ID":"5350f539-bc65-4421-b3c2-cf7dc0d52e98","Type":"ContainerStarted","Data":"56f630ca7b65ab9f05fb6092ac723e838dd3b0e192305376b7572207c5b86426"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.929337 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:19 crc kubenswrapper[5032]: E0122 16:07:19.930663 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.430646469 +0000 UTC m=+144.242815500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.931060 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" event={"ID":"0a89d202-f652-48bc-a28e-82c7790dfe94","Type":"ContainerStarted","Data":"aa1a2d4b3588b8bcc5fa7d7c93c80f86ecf2c8ab3a40a7c08ac3c9efcc8a7ade"} Jan 22 16:07:19 crc kubenswrapper[5032]: I0122 16:07:19.988516 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" podStartSLOduration=123.98847992 podStartE2EDuration="2m3.98847992s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:19.954759367 +0000 UTC m=+143.766928398" watchObservedRunningTime="2026-01-22 16:07:19.98847992 +0000 UTC m=+143.800648951" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.022565 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" event={"ID":"632f2319-d263-4e72-88ea-76f60ee7121e","Type":"ContainerStarted","Data":"b9bbfec82cd8e5fc015b1503c36b82885989468aeae05636b7d0bf5dba8c18c6"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.023052 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" podStartSLOduration=123.023022403 podStartE2EDuration="2m3.023022403s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:19.989071745 +0000 UTC m=+143.801240786" watchObservedRunningTime="2026-01-22 16:07:20.023022403 +0000 UTC m=+143.835191434" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.032723 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.033991 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.53397201 +0000 UTC m=+144.346141031 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.044189 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" event={"ID":"ceb9b499-7c79-4268-9e2c-6c65e8604713","Type":"ContainerStarted","Data":"11a91c9ad3c7cb24ee662b4b88991013a131c18a4666880a507beaf1c805838c"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.071180 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-fdtwm" event={"ID":"705472d6-e8cd-483c-b315-b3a3df1da2a3","Type":"ContainerStarted","Data":"212e5ea6ff010e6100894b9672ddc52feef61d584bffc1d1801e5acfcfa987c3"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.079047 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" event={"ID":"48cddb00-9308-423c-90df-9a0f52d6b6b3","Type":"ContainerStarted","Data":"795e07c3977f92933c255f9052952d62f32d0cc1949cd94cb53217c5d1432742"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.082452 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" event={"ID":"7271da0d-ff36-40b3-acfd-1d782e98db0f","Type":"ContainerStarted","Data":"2ab44a63462b422e766c4347cd50f276afc1bff250357b643599e0827d0ee185"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.089848 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" podStartSLOduration=123.089835373 podStartE2EDuration="2m3.089835373s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.085404895 +0000 UTC m=+143.897573946" watchObservedRunningTime="2026-01-22 16:07:20.089835373 +0000 UTC m=+143.902004404" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.097250 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" event={"ID":"3d9c8bba-0473-49f1-885c-0ae256709252","Type":"ContainerStarted","Data":"4b0d217036d7c809182fb30f3367b5b775aeeb0ea2580fc5be98788730fb5260"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.115010 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" podStartSLOduration=123.114994097 podStartE2EDuration="2m3.114994097s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.112347913 +0000 UTC m=+143.924516944" watchObservedRunningTime="2026-01-22 16:07:20.114994097 +0000 UTC m=+143.927163128" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.118125 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" event={"ID":"0a8119c9-c2f9-46d4-9dfc-942803871800","Type":"ContainerStarted","Data":"10efc00039202ba571e675b6c439ffbf47fc552a7b1da142f4f08e5ccd3df4d8"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.118168 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" event={"ID":"0a8119c9-c2f9-46d4-9dfc-942803871800","Type":"ContainerStarted","Data":"dfd3c4fcfa9edbf15a98159c1a3164311aa64884bf7b3b5b449ccce3f9352c9c"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.120944 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.134357 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.135305 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.635288762 +0000 UTC m=+144.447457793 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.141231 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g7h77" podStartSLOduration=124.141213177 podStartE2EDuration="2m4.141213177s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.140498999 +0000 UTC m=+143.952668030" watchObservedRunningTime="2026-01-22 16:07:20.141213177 +0000 UTC m=+143.953382198" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.142551 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6qrmc" event={"ID":"a35d7f0c-719a-4d9b-8c0f-da8107cb44c1","Type":"ContainerStarted","Data":"e15ffd97c29f675a56e7bc8916a6eb8e740e3e1e53ad4c2e33da596d0dc1be59"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.158532 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-p6hqk" event={"ID":"f54b6f41-be22-4926-abd2-6c390df2fb49","Type":"ContainerStarted","Data":"421ef555447aa524303353f32ec80f70cd27bdd0c0b09b61869593fae11a078a"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.163813 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" event={"ID":"abd10370-83a9-4daf-88db-e8506a3d4a08","Type":"ContainerStarted","Data":"4e3c845c9feee003bbabd9689290f30cb09ccc34df5f961fd4f2bf7b1d5c6c74"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.163879 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" event={"ID":"abd10370-83a9-4daf-88db-e8506a3d4a08","Type":"ContainerStarted","Data":"b76986a2df069dd740b5c6a0841d6386652522ba6481a3c252484ed9cb29f156"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.194981 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" event={"ID":"1e14d069-8a95-4e40-b130-f0c65506b423","Type":"ContainerStarted","Data":"8d0f81a8aa54f7c86ea0f7f31dd16b2d91ad14f313018c0990a5ffcce9ddf421"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.203633 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" event={"ID":"22d00a1b-b830-4b42-bc79-536d6e1f92e8","Type":"ContainerStarted","Data":"a980dd3a6ffe56bb0180e322064a8bd7d1e78c7f2d91ae4cabd5a023ed2aaf27"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.205392 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" podStartSLOduration=123.205380793 podStartE2EDuration="2m3.205380793s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.175678738 +0000 UTC m=+143.987847789" watchObservedRunningTime="2026-01-22 16:07:20.205380793 +0000 UTC m=+144.017549824" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.206284 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" podStartSLOduration=124.206278944 podStartE2EDuration="2m4.206278944s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.204965142 +0000 UTC m=+144.017134173" watchObservedRunningTime="2026-01-22 16:07:20.206278944 +0000 UTC m=+144.018447975" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.226161 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:20 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:20 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:20 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.226213 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.237887 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.239483 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.739470614 +0000 UTC m=+144.551639645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.255137 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" event={"ID":"a6acdcf7-347a-42db-83a3-e5fc2bde8daa","Type":"ContainerStarted","Data":"0b5d19330a442b3b602d6c497e92a1301ac2ef20c37f96fe2b981a5f1f8bdf8f"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.255951 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.266400 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" event={"ID":"dfe97498-e7f6-4e19-ad51-65c34957b415","Type":"ContainerStarted","Data":"65b52dbf14e63a1fdf74ed6d729ee546d8f46a50cfa168a966354e7333369136"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.266433 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" event={"ID":"dfe97498-e7f6-4e19-ad51-65c34957b415","Type":"ContainerStarted","Data":"606f719f9799854ccadf1a04a713f43ae36734cab828bc72de27a930703e70fd"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.276734 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" podStartSLOduration=124.276715633 podStartE2EDuration="2m4.276715633s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.275210866 +0000 UTC m=+144.087379887" watchObservedRunningTime="2026-01-22 16:07:20.276715633 +0000 UTC m=+144.088884664" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.324748 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" podStartSLOduration=124.324727275 podStartE2EDuration="2m4.324727275s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.323405472 +0000 UTC m=+144.135574503" watchObservedRunningTime="2026-01-22 16:07:20.324727275 +0000 UTC m=+144.136896306" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.341514 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.342844 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.842826906 +0000 UTC m=+144.654995937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.349465 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.398379 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-klx24" event={"ID":"1e7a95b4-7024-4049-b322-ccdd11835356","Type":"ContainerStarted","Data":"6436a6bd298c7bf81c78aad1a9166cde6b41c2e02af084c4063baa44984f595c"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.406340 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" event={"ID":"d796c730-c411-400b-91a1-00149a3b408b","Type":"ContainerStarted","Data":"6edf1f8206fc0c1ae9d3a5ff3ee886114d09951fe971a156d9924124af31c7e9"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.420929 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" event={"ID":"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f","Type":"ContainerStarted","Data":"c5114aa1402ca75e0edc3297583d58597e49545247e8d79f2dcbb0e7744769ef"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.421930 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.424972 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-vdmbf" event={"ID":"e4e04e48-8670-4b7a-9c72-e89c34bc835f","Type":"ContainerStarted","Data":"899cc20b0df90156f853392398dc8cbf6b69b8ed8dec3f64c58278af3d7cfd4c"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.428735 5032 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mt7xg container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.428792 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" podUID="5487a4a3-2a12-432d-8631-ca5dbeaaaf7f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.430220 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" event={"ID":"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900","Type":"ContainerStarted","Data":"3bc74073f18f7992301b3c92ef92b9a4d81f81c949339ff9412affcf38b0d31d"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.443751 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.444201 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:20.944172969 +0000 UTC m=+144.756342000 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.452680 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" podStartSLOduration=124.452655786 podStartE2EDuration="2m4.452655786s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.393214056 +0000 UTC m=+144.205383087" watchObservedRunningTime="2026-01-22 16:07:20.452655786 +0000 UTC m=+144.264824837" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.514614 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" event={"ID":"efbef706-f0b8-40d2-bbb1-517e5685dd30","Type":"ContainerStarted","Data":"144759ade02e8151468874b75af9e70238e4ee51d10156d477e74b15c4290e2d"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.518084 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-vdmbf" podStartSLOduration=8.518066512 podStartE2EDuration="8.518066512s" podCreationTimestamp="2026-01-22 16:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.499211372 +0000 UTC m=+144.311380403" watchObservedRunningTime="2026-01-22 16:07:20.518066512 +0000 UTC m=+144.330235543" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.533648 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" event={"ID":"11988f58-cabc-4d36-9b5c-242fe3b570d3","Type":"ContainerStarted","Data":"f592ab4f87f3e86de3040fb7b43a156a9e5fba368d597c499f47c96190c1ec91"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.537464 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" event={"ID":"77603536-1886-445b-b0e5-e0985948c137","Type":"ContainerStarted","Data":"2fc97725a80614a39ad9673249d9c0d6e682c151132cf553e791e29ed35baad4"} Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.542759 5032 patch_prober.go:28] interesting pod/downloads-7954f5f757-hppgr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.542808 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hppgr" podUID="8b534bfa-1e36-4825-bbdb-aec579b973f8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.544501 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.545342 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.045323407 +0000 UTC m=+144.857492428 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.552227 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.650161 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.662772 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.162754462 +0000 UTC m=+144.974923493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.672204 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" podStartSLOduration=123.672176432 podStartE2EDuration="2m3.672176432s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.644426205 +0000 UTC m=+144.456595246" watchObservedRunningTime="2026-01-22 16:07:20.672176432 +0000 UTC m=+144.484345463" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.728702 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcjnd" podStartSLOduration=124.728679451 podStartE2EDuration="2m4.728679451s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.718183315 +0000 UTC m=+144.530352356" watchObservedRunningTime="2026-01-22 16:07:20.728679451 +0000 UTC m=+144.540848482" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.751798 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.752203 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.252186395 +0000 UTC m=+145.064355426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.855497 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.855911 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.355897375 +0000 UTC m=+145.168066406 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.856766 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" podStartSLOduration=124.856740556 podStartE2EDuration="2m4.856740556s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.844601949 +0000 UTC m=+144.656770980" watchObservedRunningTime="2026-01-22 16:07:20.856740556 +0000 UTC m=+144.668909587" Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.959078 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:20 crc kubenswrapper[5032]: E0122 16:07:20.959598 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.459584155 +0000 UTC m=+145.271753186 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:20 crc kubenswrapper[5032]: I0122 16:07:20.979171 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" podStartSLOduration=124.979146672 podStartE2EDuration="2m4.979146672s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:20.978367403 +0000 UTC m=+144.790536434" watchObservedRunningTime="2026-01-22 16:07:20.979146672 +0000 UTC m=+144.791315703" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.062430 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.063345 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.563332067 +0000 UTC m=+145.375501098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.118953 5032 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6psnz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.119035 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" podUID="0a8119c9-c2f9-46d4-9dfc-942803871800" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.166542 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.166870 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.666838142 +0000 UTC m=+145.479007173 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.227028 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:21 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:21 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:21 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.227089 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.268183 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.268565 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.768546584 +0000 UTC m=+145.580715615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.369588 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.370267 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.870252345 +0000 UTC m=+145.682421376 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.471524 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.471940 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:21.971927976 +0000 UTC m=+145.784097007 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.563571 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-vdmbf" event={"ID":"e4e04e48-8670-4b7a-9c72-e89c34bc835f","Type":"ContainerStarted","Data":"0a26c627e17fc2c0f1ec6f53bffa05ac8a4bac533f7e3bdfadbffb9839540112"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.572439 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.572794 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.072777287 +0000 UTC m=+145.884946318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.587886 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" event={"ID":"d796c730-c411-400b-91a1-00149a3b408b","Type":"ContainerStarted","Data":"1dafbcafe72ee5feb48b0e8e735c1a0faa222409be743625df58c44549f02092"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.590657 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ht7fx" event={"ID":"abd10370-83a9-4daf-88db-e8506a3d4a08","Type":"ContainerStarted","Data":"ece1808bc179a7fd6f5b8acc91b41aa8ff8a6f7fce8a0b9599e9d77764aff9af"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.593948 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" event={"ID":"7271da0d-ff36-40b3-acfd-1d782e98db0f","Type":"ContainerStarted","Data":"8ade8baec93d642fd45f51cfc4b271d1f8eeb1e7ca1ff96299f54262e07c3165"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.593977 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" event={"ID":"7271da0d-ff36-40b3-acfd-1d782e98db0f","Type":"ContainerStarted","Data":"1cb1f681ce481f255184242fa118d7dc875ae1d9031fb56b1cda5e67db8c7897"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.594361 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.595402 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" event={"ID":"0a89d202-f652-48bc-a28e-82c7790dfe94","Type":"ContainerStarted","Data":"dd0862b0e6eb6aaf66b4b4fdb47f18fd92bc366c7f73e32df9a33c2964dde4d7"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.612333 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-h22sw" podStartSLOduration=125.612317762 podStartE2EDuration="2m5.612317762s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:21.610529148 +0000 UTC m=+145.422698179" watchObservedRunningTime="2026-01-22 16:07:21.612317762 +0000 UTC m=+145.424486793" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.616517 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" event={"ID":"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a","Type":"ContainerStarted","Data":"62b689195800dc3bbc1db3f175306c554f91ef56fd06d6d7dab741da53dc6f8b"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.616559 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" event={"ID":"5f8a4120-f6d5-4b4d-824a-77d4bf61ea9a","Type":"ContainerStarted","Data":"02bafe22f904a87d4e9afca8f56678c6abafd3c20303d8a705a1b5702b16418a"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.639173 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f8bsv" event={"ID":"11988f58-cabc-4d36-9b5c-242fe3b570d3","Type":"ContainerStarted","Data":"9353a6be731daf6a9a8678ad75d4428442acede7e31f5c9c8cd16027016a5cb5"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.673934 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.677990 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.177977314 +0000 UTC m=+145.990146345 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.679062 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-p6hqk" event={"ID":"f54b6f41-be22-4926-abd2-6c390df2fb49","Type":"ContainerStarted","Data":"db5e49e9228ff27bf4caf99bed90401023a9e169c24af74d081077380632de40"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.679097 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-p6hqk" event={"ID":"f54b6f41-be22-4926-abd2-6c390df2fb49","Type":"ContainerStarted","Data":"bae7e0849f3b292ad643dd4f0aa02d06f77990f693b334674f86f7b77292c799"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.679526 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.700570 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-fthxg" event={"ID":"dfe97498-e7f6-4e19-ad51-65c34957b415","Type":"ContainerStarted","Data":"e6940117f69eabff991ac16e9cfaca533fc09f080b2a67ac642576b1b982c205"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.701614 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" podStartSLOduration=124.70159628 podStartE2EDuration="2m4.70159628s" podCreationTimestamp="2026-01-22 16:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:21.701564409 +0000 UTC m=+145.513733440" watchObservedRunningTime="2026-01-22 16:07:21.70159628 +0000 UTC m=+145.513765311" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.729844 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" event={"ID":"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900","Type":"ContainerStarted","Data":"cf138d6550de70ec19d84428535eb1a914dfce9f43ad1888bed6381cac97e5b4"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.729926 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" event={"ID":"85d5205a-c1cb-4ab7-b2a1-a4aa7ae01900","Type":"ContainerStarted","Data":"11d77ff38780b5e39a28b584f86bedc328ee4534b13136230a51b6bc2c0ed901"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.749388 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wmgm5" event={"ID":"22d00a1b-b830-4b42-bc79-536d6e1f92e8","Type":"ContainerStarted","Data":"cd0909361233e5c9e51a3afa82103e2aa10cc59c3f9f6df0f1e6b1c293e53236"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.761107 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-dqp7n" podStartSLOduration=125.761090182 podStartE2EDuration="2m5.761090182s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:21.759983405 +0000 UTC m=+145.572152446" watchObservedRunningTime="2026-01-22 16:07:21.761090182 +0000 UTC m=+145.573259213" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.777152 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-48r96" event={"ID":"48cddb00-9308-423c-90df-9a0f52d6b6b3","Type":"ContainerStarted","Data":"1067a7c8db58bf5b092d3fef175bf0f2a81a11d015f0dceb8cc080f17ea35224"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.777939 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.779294 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.279277296 +0000 UTC m=+146.091446327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.815180 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" event={"ID":"3d9c8bba-0473-49f1-885c-0ae256709252","Type":"ContainerStarted","Data":"bf77df14c7e174ad0aa35b5e349b6648450c72c0f9d5c83787aefabcbdecb1ca"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.831493 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" event={"ID":"5487a4a3-2a12-432d-8631-ca5dbeaaaf7f","Type":"ContainerStarted","Data":"08c45e435c7d926656091ee2df3edd2bd8a7241fa1d93ddb9fe3fb49bde49436"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.831976 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-p6hqk" podStartSLOduration=9.831956411 podStartE2EDuration="9.831956411s" podCreationTimestamp="2026-01-22 16:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:21.829482091 +0000 UTC m=+145.641651122" watchObservedRunningTime="2026-01-22 16:07:21.831956411 +0000 UTC m=+145.644125442" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.850321 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mt7xg" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.857083 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" event={"ID":"5350f539-bc65-4421-b3c2-cf7dc0d52e98","Type":"ContainerStarted","Data":"95bf3cd709524c44c17b41f8931cd4f6a0f8317efdba0d681b100dd796ec0a8d"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.873096 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qcblh" event={"ID":"efbef706-f0b8-40d2-bbb1-517e5685dd30","Type":"ContainerStarted","Data":"64269d6530b858756be3bc64764296983a18b9cc66bafc7bc5861c4d30890de8"} Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.882099 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-thv97" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.882191 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zt7sw" Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.884279 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.886186 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.386173904 +0000 UTC m=+146.198342935 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.985031 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.985244 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.48520242 +0000 UTC m=+146.297371451 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:21 crc kubenswrapper[5032]: I0122 16:07:21.988023 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:21 crc kubenswrapper[5032]: E0122 16:07:21.989110 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.489092085 +0000 UTC m=+146.301261116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.092260 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.092528 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.592494488 +0000 UTC m=+146.404663519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.092647 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.093057 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.593049082 +0000 UTC m=+146.405218113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.194211 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.194632 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.69461629 +0000 UTC m=+146.506785321 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.195870 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6psnz" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.225813 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:22 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:22 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:22 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.225878 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.295544 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.295897 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.795883631 +0000 UTC m=+146.608052662 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.302558 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" podStartSLOduration=126.302542903 podStartE2EDuration="2m6.302542903s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:22.29914289 +0000 UTC m=+146.111311911" watchObservedRunningTime="2026-01-22 16:07:22.302542903 +0000 UTC m=+146.114711934" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.397132 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.397440 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.897425498 +0000 UTC m=+146.709594529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.444543 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-mrmfl" podStartSLOduration=126.444521407 podStartE2EDuration="2m6.444521407s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:22.381765106 +0000 UTC m=+146.193934137" watchObservedRunningTime="2026-01-22 16:07:22.444521407 +0000 UTC m=+146.256690438" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.499024 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.499433 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:22.999409767 +0000 UTC m=+146.811578798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.600080 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.600245 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.100217316 +0000 UTC m=+146.912386347 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.600331 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.600624 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.100610696 +0000 UTC m=+146.912779727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.701548 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.701871 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.201837756 +0000 UTC m=+147.014006787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.771813 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g59x2" podStartSLOduration=127.771790733 podStartE2EDuration="2m7.771790733s" podCreationTimestamp="2026-01-22 16:05:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:22.770272536 +0000 UTC m=+146.582441567" watchObservedRunningTime="2026-01-22 16:07:22.771790733 +0000 UTC m=+146.583959764" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.794731 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zj287"] Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.795649 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.803961 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.804346 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.304333827 +0000 UTC m=+147.116502858 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: W0122 16:07:22.804673 5032 reflector.go:561] object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g": failed to list *v1.Secret: secrets "certified-operators-dockercfg-4rs5g" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.804701 5032 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-4rs5g\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"certified-operators-dockercfg-4rs5g\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.837406 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zj287"] Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.907738 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.907894 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-catalog-content\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.907958 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6m26\" (UniqueName: \"kubernetes.io/projected/2a92ee08-99e1-4a48-80e5-5158039c8f75-kube-api-access-g6m26\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.908021 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-utilities\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:22 crc kubenswrapper[5032]: E0122 16:07:22.908139 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.408123158 +0000 UTC m=+147.220292189 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.928777 5032 csr.go:261] certificate signing request csr-plhjk is approved, waiting to be issued Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.929555 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" event={"ID":"5350f539-bc65-4421-b3c2-cf7dc0d52e98","Type":"ContainerStarted","Data":"15485d9b5e770ac1caf93fd6b46684d0dd33384d86cb83af846fb56bfdef6816"} Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.929585 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" event={"ID":"5350f539-bc65-4421-b3c2-cf7dc0d52e98","Type":"ContainerStarted","Data":"a382a8c385ff8866a39346b336399bd1a2ce00746459687b1879f0a33cbf0b15"} Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.941366 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fx8tt"] Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.942268 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.943654 5032 csr.go:257] certificate signing request csr-plhjk is issued Jan 22 16:07:22 crc kubenswrapper[5032]: I0122 16:07:22.945078 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.009512 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.009641 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6m26\" (UniqueName: \"kubernetes.io/projected/2a92ee08-99e1-4a48-80e5-5158039c8f75-kube-api-access-g6m26\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.010067 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-utilities\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.010148 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-catalog-content\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.024121 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-utilities\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.025067 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-catalog-content\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.025435 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.52541584 +0000 UTC m=+147.337584871 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.064489 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fx8tt"] Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.086172 5032 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.102525 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6m26\" (UniqueName: \"kubernetes.io/projected/2a92ee08-99e1-4a48-80e5-5158039c8f75-kube-api-access-g6m26\") pod \"certified-operators-zj287\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.112637 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.112932 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-catalog-content\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.113154 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhfpt\" (UniqueName: \"kubernetes.io/projected/fe241ed5-4a42-4178-8756-513449190b6b-kube-api-access-lhfpt\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.113275 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-utilities\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.113456 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.613428098 +0000 UTC m=+147.425597139 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.128122 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5f9qk"] Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.129945 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.138123 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5f9qk"] Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.214389 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qlnc\" (UniqueName: \"kubernetes.io/projected/d2abfe75-3d69-4043-add3-91011758e245-kube-api-access-2qlnc\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.214442 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhfpt\" (UniqueName: \"kubernetes.io/projected/fe241ed5-4a42-4178-8756-513449190b6b-kube-api-access-lhfpt\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.214486 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-utilities\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.214529 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-utilities\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.214545 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-catalog-content\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.214571 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.214597 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-catalog-content\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.215091 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-utilities\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.215348 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.715336104 +0000 UTC m=+147.527505135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.215438 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-catalog-content\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.225231 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:23 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:23 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:23 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.225270 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.238448 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhfpt\" (UniqueName: \"kubernetes.io/projected/fe241ed5-4a42-4178-8756-513449190b6b-kube-api-access-lhfpt\") pod \"community-operators-fx8tt\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.260773 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.315975 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.316198 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.816167395 +0000 UTC m=+147.628336426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.316306 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qlnc\" (UniqueName: \"kubernetes.io/projected/d2abfe75-3d69-4043-add3-91011758e245-kube-api-access-2qlnc\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.316402 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-utilities\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.316430 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.316484 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-catalog-content\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.316803 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.81678976 +0000 UTC m=+147.628958781 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.317044 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-utilities\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.317297 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-catalog-content\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.319155 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9p297"] Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.320260 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.335689 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9p297"] Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.361789 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qlnc\" (UniqueName: \"kubernetes.io/projected/d2abfe75-3d69-4043-add3-91011758e245-kube-api-access-2qlnc\") pod \"certified-operators-5f9qk\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.417962 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.418464 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-catalog-content\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.418506 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-utilities\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.418530 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjncg\" (UniqueName: \"kubernetes.io/projected/9d0a99e7-360b-4ce1-bcc8-23aa20685880-kube-api-access-fjncg\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.418690 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:23.918670866 +0000 UTC m=+147.730839897 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.519852 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-utilities\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.519936 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjncg\" (UniqueName: \"kubernetes.io/projected/9d0a99e7-360b-4ce1-bcc8-23aa20685880-kube-api-access-fjncg\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.520056 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.520090 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-catalog-content\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.520419 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-utilities\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.520559 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-catalog-content\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.520575 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 16:07:24.020553872 +0000 UTC m=+147.832722903 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ndwb9" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.534877 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fx8tt"] Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.539633 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjncg\" (UniqueName: \"kubernetes.io/projected/9d0a99e7-360b-4ce1-bcc8-23aa20685880-kube-api-access-fjncg\") pod \"community-operators-9p297\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: W0122 16:07:23.553237 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe241ed5_4a42_4178_8756_513449190b6b.slice/crio-795f383d6289ff4152bfb7dacd3d57c0dbc9c43f770b81e83ef064fa0113799d WatchSource:0}: Error finding container 795f383d6289ff4152bfb7dacd3d57c0dbc9c43f770b81e83ef064fa0113799d: Status 404 returned error can't find the container with id 795f383d6289ff4152bfb7dacd3d57c0dbc9c43f770b81e83ef064fa0113799d Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.621382 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:23 crc kubenswrapper[5032]: E0122 16:07:23.621679 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 16:07:24.121634848 +0000 UTC m=+147.933803879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.638658 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.708578 5032 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-22T16:07:23.086727616Z","Handler":null,"Name":""} Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.721653 5032 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.721696 5032 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.725825 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.730396 5032 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.730421 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.793883 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ndwb9\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.829524 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.844989 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.887820 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9p297"] Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.914839 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.936690 5032 generic.go:334] "Generic (PLEG): container finished" podID="fe241ed5-4a42-4178-8756-513449190b6b" containerID="28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea" exitCode=0 Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.936979 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fx8tt" event={"ID":"fe241ed5-4a42-4178-8756-513449190b6b","Type":"ContainerDied","Data":"28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea"} Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.937074 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fx8tt" event={"ID":"fe241ed5-4a42-4178-8756-513449190b6b","Type":"ContainerStarted","Data":"795f383d6289ff4152bfb7dacd3d57c0dbc9c43f770b81e83ef064fa0113799d"} Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.939016 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.945377 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-22 16:02:22 +0000 UTC, rotation deadline is 2026-10-22 15:21:55.597341382 +0000 UTC Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.945412 5032 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6551h14m31.651932104s for next certificate rotation Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.947350 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" event={"ID":"5350f539-bc65-4421-b3c2-cf7dc0d52e98","Type":"ContainerStarted","Data":"382869c43becd8e4ce26e09a8619338238ddcb2360f7f1b953d9644a1eea864e"} Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.949308 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p297" event={"ID":"9d0a99e7-360b-4ce1-bcc8-23aa20685880","Type":"ContainerStarted","Data":"911edb85eb15ddd9256df5256762a766f5f46dcf3090e278093b538a0a2b175b"} Jan 22 16:07:23 crc kubenswrapper[5032]: I0122 16:07:23.978069 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-xdxqs" podStartSLOduration=11.978049624 podStartE2EDuration="11.978049624s" podCreationTimestamp="2026-01-22 16:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:23.975522233 +0000 UTC m=+147.787691264" watchObservedRunningTime="2026-01-22 16:07:23.978049624 +0000 UTC m=+147.790218655" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.129784 5032 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-marketplace/certified-operators-zj287" secret="" err="failed to sync secret cache: timed out waiting for the condition" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.130511 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.141045 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ndwb9"] Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.227052 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:24 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:24 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:24 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.227126 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.368589 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.375920 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.391317 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zj287"] Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.396420 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.397273 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.401011 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.401224 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.409198 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.442551 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.442596 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.444748 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.454248 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.463268 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.543773 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e6e215e-2303-4663-925e-796ee811e341-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.543878 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.543917 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.543974 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e6e215e-2303-4663-925e-796ee811e341-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.547769 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.548840 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.555365 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-hppgr" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.565418 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.565958 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.583295 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.613652 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5f9qk"] Jan 22 16:07:24 crc kubenswrapper[5032]: W0122 16:07:24.626502 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2abfe75_3d69_4043_add3_91011758e245.slice/crio-be9982c7d224e0285aff1f2f39d4129512ff0271ffe82dba87253e5b614909ae WatchSource:0}: Error finding container be9982c7d224e0285aff1f2f39d4129512ff0271ffe82dba87253e5b614909ae: Status 404 returned error can't find the container with id be9982c7d224e0285aff1f2f39d4129512ff0271ffe82dba87253e5b614909ae Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.630054 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.640332 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.644832 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e6e215e-2303-4663-925e-796ee811e341-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.644909 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e6e215e-2303-4663-925e-796ee811e341-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.646006 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e6e215e-2303-4663-925e-796ee811e341-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.646263 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.664179 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e6e215e-2303-4663-925e-796ee811e341-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.788649 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.919396 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h6xjw"] Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.922480 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.924780 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.928185 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6xjw"] Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.936087 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.936122 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.953553 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.994495 5032 generic.go:334] "Generic (PLEG): container finished" podID="d2abfe75-3d69-4043-add3-91011758e245" containerID="007988985cf44a1dda214c6f0b47be1ed8b479670ce53ddce5feb531e8987cd7" exitCode=0 Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.994592 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5f9qk" event={"ID":"d2abfe75-3d69-4043-add3-91011758e245","Type":"ContainerDied","Data":"007988985cf44a1dda214c6f0b47be1ed8b479670ce53ddce5feb531e8987cd7"} Jan 22 16:07:24 crc kubenswrapper[5032]: I0122 16:07:24.994622 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5f9qk" event={"ID":"d2abfe75-3d69-4043-add3-91011758e245","Type":"ContainerStarted","Data":"be9982c7d224e0285aff1f2f39d4129512ff0271ffe82dba87253e5b614909ae"} Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.043359 5032 generic.go:334] "Generic (PLEG): container finished" podID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerID="bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c" exitCode=0 Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.043435 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p297" event={"ID":"9d0a99e7-360b-4ce1-bcc8-23aa20685880","Type":"ContainerDied","Data":"bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c"} Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.069260 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njpkr\" (UniqueName: \"kubernetes.io/projected/934b652c-ef02-4e1b-8043-1d26ea9d2c56-kube-api-access-njpkr\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.069325 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-utilities\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.069391 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-catalog-content\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.080529 5032 generic.go:334] "Generic (PLEG): container finished" podID="1e14d069-8a95-4e40-b130-f0c65506b423" containerID="8d0f81a8aa54f7c86ea0f7f31dd16b2d91ad14f313018c0990a5ffcce9ddf421" exitCode=0 Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.080603 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" event={"ID":"1e14d069-8a95-4e40-b130-f0c65506b423","Type":"ContainerDied","Data":"8d0f81a8aa54f7c86ea0f7f31dd16b2d91ad14f313018c0990a5ffcce9ddf421"} Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.089072 5032 generic.go:334] "Generic (PLEG): container finished" podID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerID="cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09" exitCode=0 Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.089133 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj287" event={"ID":"2a92ee08-99e1-4a48-80e5-5158039c8f75","Type":"ContainerDied","Data":"cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09"} Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.089155 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj287" event={"ID":"2a92ee08-99e1-4a48-80e5-5158039c8f75","Type":"ContainerStarted","Data":"08409f0652c3c38ae55288ff5a0077362401bac7f76073f9070c485373423735"} Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.097354 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" event={"ID":"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4","Type":"ContainerStarted","Data":"3ac5e38c1d8b49cace9305ae211cfded3bc36c43805cb92478fcf5d5f8d50b6d"} Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.097394 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" event={"ID":"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4","Type":"ContainerStarted","Data":"5601b7894cfa24827bb2ccf67e807eef58bb6974d59624892564b35f52a6b484"} Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.105151 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wjs5l" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.113475 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-v7spn" Jan 22 16:07:25 crc kubenswrapper[5032]: W0122 16:07:25.129064 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-2baa4b75150abf5736d7d894000c356c74680f779480d51efe63241b9f2a911e WatchSource:0}: Error finding container 2baa4b75150abf5736d7d894000c356c74680f779480d51efe63241b9f2a911e: Status 404 returned error can't find the container with id 2baa4b75150abf5736d7d894000c356c74680f779480d51efe63241b9f2a911e Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.132076 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.177614 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njpkr\" (UniqueName: \"kubernetes.io/projected/934b652c-ef02-4e1b-8043-1d26ea9d2c56-kube-api-access-njpkr\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.177687 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-utilities\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.177729 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-catalog-content\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.178461 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-utilities\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.178514 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-catalog-content\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.211751 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njpkr\" (UniqueName: \"kubernetes.io/projected/934b652c-ef02-4e1b-8043-1d26ea9d2c56-kube-api-access-njpkr\") pod \"redhat-marketplace-h6xjw\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.226573 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.250213 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.250559 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:25 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:25 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:25 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.250619 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.261258 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" podStartSLOduration=129.261222644 podStartE2EDuration="2m9.261222644s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:25.259516582 +0000 UTC m=+149.071685623" watchObservedRunningTime="2026-01-22 16:07:25.261222644 +0000 UTC m=+149.073391675" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.338706 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-q67l2"] Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.340222 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.362771 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q67l2"] Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.488147 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbhd6\" (UniqueName: \"kubernetes.io/projected/833d74e1-afb3-429e-93c1-75904ea02347-kube-api-access-kbhd6\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.488268 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-utilities\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.488294 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-catalog-content\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.590684 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-utilities\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.591200 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-catalog-content\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.591234 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbhd6\" (UniqueName: \"kubernetes.io/projected/833d74e1-afb3-429e-93c1-75904ea02347-kube-api-access-kbhd6\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.592553 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-catalog-content\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.592647 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-utilities\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.621157 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbhd6\" (UniqueName: \"kubernetes.io/projected/833d74e1-afb3-429e-93c1-75904ea02347-kube-api-access-kbhd6\") pod \"redhat-marketplace-q67l2\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.666550 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6xjw"] Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.701725 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.701781 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.716017 5032 patch_prober.go:28] interesting pod/console-f9d7485db-w7mns container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.716117 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-w7mns" podUID="b3b48fcd-6341-4599-ae1e-f424e4a24f55" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.756315 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.923933 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wlqfh"] Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.926598 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.934837 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wlqfh"] Jan 22 16:07:25 crc kubenswrapper[5032]: I0122 16:07:25.938159 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.020075 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.021457 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.023820 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlb6k\" (UniqueName: \"kubernetes.io/projected/d11bfc17-4e06-403a-8632-87b168d498cf-kube-api-access-rlb6k\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.023920 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-catalog-content\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.023997 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6386a09e-2963-4aec-b919-a1d387cdf851-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.024021 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-utilities\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.024064 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6386a09e-2963-4aec-b919-a1d387cdf851-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.024381 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.025167 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.025689 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.125219 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6386a09e-2963-4aec-b919-a1d387cdf851-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.125279 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-utilities\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.125320 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6386a09e-2963-4aec-b919-a1d387cdf851-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.125356 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlb6k\" (UniqueName: \"kubernetes.io/projected/d11bfc17-4e06-403a-8632-87b168d498cf-kube-api-access-rlb6k\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.125376 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-catalog-content\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.125907 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-utilities\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.125979 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-catalog-content\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.126389 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6386a09e-2963-4aec-b919-a1d387cdf851-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.132832 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b544ae8f3fb8bdecc8b102bbafa373bb1ea2250fc95f504a3bc6143e1c987777"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.132900 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"96efd19884f93a0b33a5a2da00f756fc2cc41236acbe2f13633b2693c61c46dc"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.137232 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"ab677bbb133117cdd32a3c4a8912f9e08571112d71deb7cabb047ff70da30d4f"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.137263 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e3868d30cfc4764c59de78e3384bada721c77e744066db206286dfe3d6196673"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.144838 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ac42af31777519c11fad62e16e211e46f89003f2677d1ee7a9b01e40528e644d"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.144899 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"2baa4b75150abf5736d7d894000c356c74680f779480d51efe63241b9f2a911e"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.145182 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6386a09e-2963-4aec-b919-a1d387cdf851-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.145388 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.146134 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlb6k\" (UniqueName: \"kubernetes.io/projected/d11bfc17-4e06-403a-8632-87b168d498cf-kube-api-access-rlb6k\") pod \"redhat-operators-wlqfh\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.148696 5032 generic.go:334] "Generic (PLEG): container finished" podID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerID="59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3" exitCode=0 Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.148751 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6xjw" event={"ID":"934b652c-ef02-4e1b-8043-1d26ea9d2c56","Type":"ContainerDied","Data":"59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.148770 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6xjw" event={"ID":"934b652c-ef02-4e1b-8043-1d26ea9d2c56","Type":"ContainerStarted","Data":"e5e3f3ac4acea5c31b3c0f45e0bdb6657b7b88e1d528fa25c7ac6f9f7e009bdb"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.167343 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7e6e215e-2303-4663-925e-796ee811e341","Type":"ContainerStarted","Data":"7b7e61e216d1e2f97a954e384e2e8e2b981a0f0f15d3fdd19e159d3b881caeff"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.167401 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7e6e215e-2303-4663-925e-796ee811e341","Type":"ContainerStarted","Data":"763fdcc3e018554ec3c211db7aef278303ec07a7384fc92ee73eb1c4a8d10677"} Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.167594 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.235177 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.2351505879999998 podStartE2EDuration="2.235150588s" podCreationTimestamp="2026-01-22 16:07:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:26.226287181 +0000 UTC m=+150.038456212" watchObservedRunningTime="2026-01-22 16:07:26.235150588 +0000 UTC m=+150.047319629" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.235547 5032 patch_prober.go:28] interesting pod/router-default-5444994796-4tdkk container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 16:07:26 crc kubenswrapper[5032]: [-]has-synced failed: reason withheld Jan 22 16:07:26 crc kubenswrapper[5032]: [+]process-running ok Jan 22 16:07:26 crc kubenswrapper[5032]: healthz check failed Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.235617 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4tdkk" podUID="da458f3d-fa4b-40d1-b98b-903b4af624ce" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.271288 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.275297 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q67l2"] Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.323846 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g7g9c"] Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.324913 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.342683 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.354917 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g7g9c"] Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.434874 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-utilities\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.434943 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-catalog-content\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.434977 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7k6t\" (UniqueName: \"kubernetes.io/projected/a130b809-3234-4d66-a8b8-1fcd5651244b-kube-api-access-k7k6t\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.536381 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-utilities\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.536473 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-catalog-content\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.536500 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7k6t\" (UniqueName: \"kubernetes.io/projected/a130b809-3234-4d66-a8b8-1fcd5651244b-kube-api-access-k7k6t\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.537629 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-utilities\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.538009 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-catalog-content\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.566152 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7k6t\" (UniqueName: \"kubernetes.io/projected/a130b809-3234-4d66-a8b8-1fcd5651244b-kube-api-access-k7k6t\") pod \"redhat-operators-g7g9c\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.587571 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.690302 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.731089 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wlqfh"] Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.747072 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e14d069-8a95-4e40-b130-f0c65506b423-config-volume\") pod \"1e14d069-8a95-4e40-b130-f0c65506b423\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.747153 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e14d069-8a95-4e40-b130-f0c65506b423-secret-volume\") pod \"1e14d069-8a95-4e40-b130-f0c65506b423\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.747185 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpf6h\" (UniqueName: \"kubernetes.io/projected/1e14d069-8a95-4e40-b130-f0c65506b423-kube-api-access-rpf6h\") pod \"1e14d069-8a95-4e40-b130-f0c65506b423\" (UID: \"1e14d069-8a95-4e40-b130-f0c65506b423\") " Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.754719 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e14d069-8a95-4e40-b130-f0c65506b423-config-volume" (OuterVolumeSpecName: "config-volume") pod "1e14d069-8a95-4e40-b130-f0c65506b423" (UID: "1e14d069-8a95-4e40-b130-f0c65506b423"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.756293 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e14d069-8a95-4e40-b130-f0c65506b423-kube-api-access-rpf6h" (OuterVolumeSpecName: "kube-api-access-rpf6h") pod "1e14d069-8a95-4e40-b130-f0c65506b423" (UID: "1e14d069-8a95-4e40-b130-f0c65506b423"). InnerVolumeSpecName "kube-api-access-rpf6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.757604 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e14d069-8a95-4e40-b130-f0c65506b423-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1e14d069-8a95-4e40-b130-f0c65506b423" (UID: "1e14d069-8a95-4e40-b130-f0c65506b423"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.788117 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 16:07:26 crc kubenswrapper[5032]: W0122 16:07:26.834139 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd11bfc17_4e06_403a_8632_87b168d498cf.slice/crio-a038c7625ea45ee7d161afc50da23f8fb8023c16b28162bf208e26bd725e75b0 WatchSource:0}: Error finding container a038c7625ea45ee7d161afc50da23f8fb8023c16b28162bf208e26bd725e75b0: Status 404 returned error can't find the container with id a038c7625ea45ee7d161afc50da23f8fb8023c16b28162bf208e26bd725e75b0 Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.850997 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e14d069-8a95-4e40-b130-f0c65506b423-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.851031 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e14d069-8a95-4e40-b130-f0c65506b423-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:26 crc kubenswrapper[5032]: I0122 16:07:26.851040 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpf6h\" (UniqueName: \"kubernetes.io/projected/1e14d069-8a95-4e40-b130-f0c65506b423-kube-api-access-rpf6h\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:26 crc kubenswrapper[5032]: W0122 16:07:26.881454 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6386a09e_2963_4aec_b919_a1d387cdf851.slice/crio-970c388cae733fb930826785e624e57faa3a8b53aea3f1e30876130bdc050de6 WatchSource:0}: Error finding container 970c388cae733fb930826785e624e57faa3a8b53aea3f1e30876130bdc050de6: Status 404 returned error can't find the container with id 970c388cae733fb930826785e624e57faa3a8b53aea3f1e30876130bdc050de6 Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.224501 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g7g9c"] Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.227681 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" event={"ID":"1e14d069-8a95-4e40-b130-f0c65506b423","Type":"ContainerDied","Data":"1bbf239e86add940465c5ca44bbe8451270efba56000c4a6057ee60176a3e512"} Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.227728 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bbf239e86add940465c5ca44bbe8451270efba56000c4a6057ee60176a3e512" Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.227838 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5" Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.232437 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.242693 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-4tdkk" Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.249376 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wlqfh" event={"ID":"d11bfc17-4e06-403a-8632-87b168d498cf","Type":"ContainerStarted","Data":"a038c7625ea45ee7d161afc50da23f8fb8023c16b28162bf208e26bd725e75b0"} Jan 22 16:07:27 crc kubenswrapper[5032]: W0122 16:07:27.258593 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda130b809_3234_4d66_a8b8_1fcd5651244b.slice/crio-50216c976e0c71543f20f7cfee9610229bfc3cfc4577764626e49a4350817de3 WatchSource:0}: Error finding container 50216c976e0c71543f20f7cfee9610229bfc3cfc4577764626e49a4350817de3: Status 404 returned error can't find the container with id 50216c976e0c71543f20f7cfee9610229bfc3cfc4577764626e49a4350817de3 Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.262732 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6386a09e-2963-4aec-b919-a1d387cdf851","Type":"ContainerStarted","Data":"970c388cae733fb930826785e624e57faa3a8b53aea3f1e30876130bdc050de6"} Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.281562 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q67l2" event={"ID":"833d74e1-afb3-429e-93c1-75904ea02347","Type":"ContainerStarted","Data":"1927ed7df28762c8bd26578a9070284d73c465aaa0032f1dc867939ed61e140c"} Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.291846 5032 generic.go:334] "Generic (PLEG): container finished" podID="7e6e215e-2303-4663-925e-796ee811e341" containerID="7b7e61e216d1e2f97a954e384e2e8e2b981a0f0f15d3fdd19e159d3b881caeff" exitCode=0 Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.292203 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7e6e215e-2303-4663-925e-796ee811e341","Type":"ContainerDied","Data":"7b7e61e216d1e2f97a954e384e2e8e2b981a0f0f15d3fdd19e159d3b881caeff"} Jan 22 16:07:27 crc kubenswrapper[5032]: I0122 16:07:27.859973 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:07:28 crc kubenswrapper[5032]: E0122 16:07:28.032436 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd11bfc17_4e06_403a_8632_87b168d498cf.slice/crio-conmon-de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.322301 5032 generic.go:334] "Generic (PLEG): container finished" podID="833d74e1-afb3-429e-93c1-75904ea02347" containerID="8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769" exitCode=0 Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.322367 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q67l2" event={"ID":"833d74e1-afb3-429e-93c1-75904ea02347","Type":"ContainerDied","Data":"8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769"} Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.326537 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerStarted","Data":"50216c976e0c71543f20f7cfee9610229bfc3cfc4577764626e49a4350817de3"} Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.328302 5032 generic.go:334] "Generic (PLEG): container finished" podID="d11bfc17-4e06-403a-8632-87b168d498cf" containerID="de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04" exitCode=0 Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.328762 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wlqfh" event={"ID":"d11bfc17-4e06-403a-8632-87b168d498cf","Type":"ContainerDied","Data":"de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04"} Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.608334 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.784922 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e6e215e-2303-4663-925e-796ee811e341-kubelet-dir\") pod \"7e6e215e-2303-4663-925e-796ee811e341\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.785025 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e6e215e-2303-4663-925e-796ee811e341-kube-api-access\") pod \"7e6e215e-2303-4663-925e-796ee811e341\" (UID: \"7e6e215e-2303-4663-925e-796ee811e341\") " Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.785640 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7e6e215e-2303-4663-925e-796ee811e341-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7e6e215e-2303-4663-925e-796ee811e341" (UID: "7e6e215e-2303-4663-925e-796ee811e341"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.792904 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e6e215e-2303-4663-925e-796ee811e341-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7e6e215e-2303-4663-925e-796ee811e341" (UID: "7e6e215e-2303-4663-925e-796ee811e341"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.886626 5032 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e6e215e-2303-4663-925e-796ee811e341-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:28 crc kubenswrapper[5032]: I0122 16:07:28.886660 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e6e215e-2303-4663-925e-796ee811e341-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:29 crc kubenswrapper[5032]: I0122 16:07:29.346774 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6386a09e-2963-4aec-b919-a1d387cdf851","Type":"ContainerStarted","Data":"2a88167590ec767a46fe08bbb54b4ce2d2bb74117ef0c8048de732f1fd27921d"} Jan 22 16:07:29 crc kubenswrapper[5032]: I0122 16:07:29.350101 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7e6e215e-2303-4663-925e-796ee811e341","Type":"ContainerDied","Data":"763fdcc3e018554ec3c211db7aef278303ec07a7384fc92ee73eb1c4a8d10677"} Jan 22 16:07:29 crc kubenswrapper[5032]: I0122 16:07:29.350166 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="763fdcc3e018554ec3c211db7aef278303ec07a7384fc92ee73eb1c4a8d10677" Jan 22 16:07:29 crc kubenswrapper[5032]: I0122 16:07:29.350626 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 16:07:29 crc kubenswrapper[5032]: I0122 16:07:29.356069 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerStarted","Data":"edda33e2f4fbfdf76c48363b5a076c851e94afe5ee931adb83fec8bd826cbdcd"} Jan 22 16:07:30 crc kubenswrapper[5032]: I0122 16:07:30.369688 5032 generic.go:334] "Generic (PLEG): container finished" podID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerID="edda33e2f4fbfdf76c48363b5a076c851e94afe5ee931adb83fec8bd826cbdcd" exitCode=0 Jan 22 16:07:30 crc kubenswrapper[5032]: I0122 16:07:30.369777 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerDied","Data":"edda33e2f4fbfdf76c48363b5a076c851e94afe5ee931adb83fec8bd826cbdcd"} Jan 22 16:07:30 crc kubenswrapper[5032]: I0122 16:07:30.845123 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-p6hqk" Jan 22 16:07:30 crc kubenswrapper[5032]: I0122 16:07:30.859280 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=5.859259594 podStartE2EDuration="5.859259594s" podCreationTimestamp="2026-01-22 16:07:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:07:30.403486653 +0000 UTC m=+154.215655674" watchObservedRunningTime="2026-01-22 16:07:30.859259594 +0000 UTC m=+154.671428625" Jan 22 16:07:31 crc kubenswrapper[5032]: I0122 16:07:31.381634 5032 generic.go:334] "Generic (PLEG): container finished" podID="6386a09e-2963-4aec-b919-a1d387cdf851" containerID="2a88167590ec767a46fe08bbb54b4ce2d2bb74117ef0c8048de732f1fd27921d" exitCode=0 Jan 22 16:07:31 crc kubenswrapper[5032]: I0122 16:07:31.381711 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6386a09e-2963-4aec-b919-a1d387cdf851","Type":"ContainerDied","Data":"2a88167590ec767a46fe08bbb54b4ce2d2bb74117ef0c8048de732f1fd27921d"} Jan 22 16:07:32 crc kubenswrapper[5032]: I0122 16:07:32.680451 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:32 crc kubenswrapper[5032]: I0122 16:07:32.856793 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6386a09e-2963-4aec-b919-a1d387cdf851-kubelet-dir\") pod \"6386a09e-2963-4aec-b919-a1d387cdf851\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " Jan 22 16:07:32 crc kubenswrapper[5032]: I0122 16:07:32.856877 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6386a09e-2963-4aec-b919-a1d387cdf851-kube-api-access\") pod \"6386a09e-2963-4aec-b919-a1d387cdf851\" (UID: \"6386a09e-2963-4aec-b919-a1d387cdf851\") " Jan 22 16:07:32 crc kubenswrapper[5032]: I0122 16:07:32.856964 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6386a09e-2963-4aec-b919-a1d387cdf851-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6386a09e-2963-4aec-b919-a1d387cdf851" (UID: "6386a09e-2963-4aec-b919-a1d387cdf851"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:07:32 crc kubenswrapper[5032]: I0122 16:07:32.857178 5032 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6386a09e-2963-4aec-b919-a1d387cdf851-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:32 crc kubenswrapper[5032]: I0122 16:07:32.864089 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6386a09e-2963-4aec-b919-a1d387cdf851-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6386a09e-2963-4aec-b919-a1d387cdf851" (UID: "6386a09e-2963-4aec-b919-a1d387cdf851"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:07:32 crc kubenswrapper[5032]: I0122 16:07:32.958309 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6386a09e-2963-4aec-b919-a1d387cdf851-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:33 crc kubenswrapper[5032]: I0122 16:07:33.016724 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:07:33 crc kubenswrapper[5032]: I0122 16:07:33.016798 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:07:33 crc kubenswrapper[5032]: I0122 16:07:33.399464 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6386a09e-2963-4aec-b919-a1d387cdf851","Type":"ContainerDied","Data":"970c388cae733fb930826785e624e57faa3a8b53aea3f1e30876130bdc050de6"} Jan 22 16:07:33 crc kubenswrapper[5032]: I0122 16:07:33.399953 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="970c388cae733fb930826785e624e57faa3a8b53aea3f1e30876130bdc050de6" Jan 22 16:07:33 crc kubenswrapper[5032]: I0122 16:07:33.399531 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 16:07:35 crc kubenswrapper[5032]: I0122 16:07:35.780552 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:35 crc kubenswrapper[5032]: I0122 16:07:35.785941 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:07:37 crc kubenswrapper[5032]: I0122 16:07:37.791602 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hzc8"] Jan 22 16:07:37 crc kubenswrapper[5032]: I0122 16:07:37.792164 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" podUID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" containerName="controller-manager" containerID="cri-o://03bd51209aaf84097f883217b2475d582e0a9e3e1fbf0df8e481c1a43298d51b" gracePeriod=30 Jan 22 16:07:37 crc kubenswrapper[5032]: I0122 16:07:37.819915 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2"] Jan 22 16:07:37 crc kubenswrapper[5032]: I0122 16:07:37.820151 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" podUID="52d51718-aef8-4657-859f-aae6bb36d879" containerName="route-controller-manager" containerID="cri-o://d666875f70cf7775c4b85843ee5c2fdf7258074a001edc203eb08db19a03eefe" gracePeriod=30 Jan 22 16:07:38 crc kubenswrapper[5032]: I0122 16:07:38.841982 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:38 crc kubenswrapper[5032]: I0122 16:07:38.849427 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2-metrics-certs\") pod \"network-metrics-daemon-95g6b\" (UID: \"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2\") " pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:39 crc kubenswrapper[5032]: I0122 16:07:39.034326 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-95g6b" Jan 22 16:07:39 crc kubenswrapper[5032]: I0122 16:07:39.436594 5032 generic.go:334] "Generic (PLEG): container finished" podID="52d51718-aef8-4657-859f-aae6bb36d879" containerID="d666875f70cf7775c4b85843ee5c2fdf7258074a001edc203eb08db19a03eefe" exitCode=0 Jan 22 16:07:39 crc kubenswrapper[5032]: I0122 16:07:39.436683 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" event={"ID":"52d51718-aef8-4657-859f-aae6bb36d879","Type":"ContainerDied","Data":"d666875f70cf7775c4b85843ee5c2fdf7258074a001edc203eb08db19a03eefe"} Jan 22 16:07:39 crc kubenswrapper[5032]: I0122 16:07:39.440023 5032 generic.go:334] "Generic (PLEG): container finished" podID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" containerID="03bd51209aaf84097f883217b2475d582e0a9e3e1fbf0df8e481c1a43298d51b" exitCode=0 Jan 22 16:07:39 crc kubenswrapper[5032]: I0122 16:07:39.440075 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" event={"ID":"c01acdfa-c0a1-46bb-a57e-e616540daeaa","Type":"ContainerDied","Data":"03bd51209aaf84097f883217b2475d582e0a9e3e1fbf0df8e481c1a43298d51b"} Jan 22 16:07:43 crc kubenswrapper[5032]: I0122 16:07:43.925060 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:07:45 crc kubenswrapper[5032]: I0122 16:07:45.138290 5032 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2hzc8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 22 16:07:45 crc kubenswrapper[5032]: I0122 16:07:45.138415 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" podUID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 22 16:07:45 crc kubenswrapper[5032]: I0122 16:07:45.630116 5032 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-k9qb2 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": context deadline exceeded" start-of-body= Jan 22 16:07:45 crc kubenswrapper[5032]: I0122 16:07:45.631079 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" podUID="52d51718-aef8-4657-859f-aae6bb36d879" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": context deadline exceeded" Jan 22 16:07:55 crc kubenswrapper[5032]: I0122 16:07:55.137809 5032 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2hzc8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 22 16:07:55 crc kubenswrapper[5032]: I0122 16:07:55.138274 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" podUID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 22 16:07:55 crc kubenswrapper[5032]: I0122 16:07:55.500120 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fmph6" Jan 22 16:07:55 crc kubenswrapper[5032]: I0122 16:07:55.628631 5032 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-k9qb2 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 16:07:55 crc kubenswrapper[5032]: I0122 16:07:55.629098 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" podUID="52d51718-aef8-4657-859f-aae6bb36d879" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.935704 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.967640 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf"] Jan 22 16:07:56 crc kubenswrapper[5032]: E0122 16:07:56.967896 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d51718-aef8-4657-859f-aae6bb36d879" containerName="route-controller-manager" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.967908 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d51718-aef8-4657-859f-aae6bb36d879" containerName="route-controller-manager" Jan 22 16:07:56 crc kubenswrapper[5032]: E0122 16:07:56.967920 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e14d069-8a95-4e40-b130-f0c65506b423" containerName="collect-profiles" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.967926 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e14d069-8a95-4e40-b130-f0c65506b423" containerName="collect-profiles" Jan 22 16:07:56 crc kubenswrapper[5032]: E0122 16:07:56.967933 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e6e215e-2303-4663-925e-796ee811e341" containerName="pruner" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.967938 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e6e215e-2303-4663-925e-796ee811e341" containerName="pruner" Jan 22 16:07:56 crc kubenswrapper[5032]: E0122 16:07:56.967948 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6386a09e-2963-4aec-b919-a1d387cdf851" containerName="pruner" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.967954 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6386a09e-2963-4aec-b919-a1d387cdf851" containerName="pruner" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.968039 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e6e215e-2303-4663-925e-796ee811e341" containerName="pruner" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.968048 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d51718-aef8-4657-859f-aae6bb36d879" containerName="route-controller-manager" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.968059 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e14d069-8a95-4e40-b130-f0c65506b423" containerName="collect-profiles" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.968068 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6386a09e-2963-4aec-b919-a1d387cdf851" containerName="pruner" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.968454 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:56 crc kubenswrapper[5032]: I0122 16:07:56.988588 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf"] Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.121969 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-config\") pod \"52d51718-aef8-4657-859f-aae6bb36d879\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.122338 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66ndm\" (UniqueName: \"kubernetes.io/projected/52d51718-aef8-4657-859f-aae6bb36d879-kube-api-access-66ndm\") pod \"52d51718-aef8-4657-859f-aae6bb36d879\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.122384 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d51718-aef8-4657-859f-aae6bb36d879-serving-cert\") pod \"52d51718-aef8-4657-859f-aae6bb36d879\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.122485 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-client-ca\") pod \"52d51718-aef8-4657-859f-aae6bb36d879\" (UID: \"52d51718-aef8-4657-859f-aae6bb36d879\") " Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.122661 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwhzv\" (UniqueName: \"kubernetes.io/projected/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-kube-api-access-rwhzv\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.122690 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-config\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.122729 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-serving-cert\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.122767 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-client-ca\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.123495 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-client-ca" (OuterVolumeSpecName: "client-ca") pod "52d51718-aef8-4657-859f-aae6bb36d879" (UID: "52d51718-aef8-4657-859f-aae6bb36d879"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.124000 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-config" (OuterVolumeSpecName: "config") pod "52d51718-aef8-4657-859f-aae6bb36d879" (UID: "52d51718-aef8-4657-859f-aae6bb36d879"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.129064 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d51718-aef8-4657-859f-aae6bb36d879-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "52d51718-aef8-4657-859f-aae6bb36d879" (UID: "52d51718-aef8-4657-859f-aae6bb36d879"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.130011 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52d51718-aef8-4657-859f-aae6bb36d879-kube-api-access-66ndm" (OuterVolumeSpecName: "kube-api-access-66ndm") pod "52d51718-aef8-4657-859f-aae6bb36d879" (UID: "52d51718-aef8-4657-859f-aae6bb36d879"). InnerVolumeSpecName "kube-api-access-66ndm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.223679 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-config\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.223839 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-serving-cert\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.223962 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-client-ca\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.224046 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwhzv\" (UniqueName: \"kubernetes.io/projected/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-kube-api-access-rwhzv\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.224118 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.224144 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66ndm\" (UniqueName: \"kubernetes.io/projected/52d51718-aef8-4657-859f-aae6bb36d879-kube-api-access-66ndm\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.224165 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d51718-aef8-4657-859f-aae6bb36d879-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.224184 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/52d51718-aef8-4657-859f-aae6bb36d879-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.227150 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-config\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.227685 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-client-ca\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.231692 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-serving-cert\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.245322 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwhzv\" (UniqueName: \"kubernetes.io/projected/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-kube-api-access-rwhzv\") pod \"route-controller-manager-5c554bd657-t2qdf\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.291936 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.617065 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" event={"ID":"52d51718-aef8-4657-859f-aae6bb36d879","Type":"ContainerDied","Data":"d9609d00d0ba1a2230756727daf3632e2a93d7827316c19ac0b12ae8bfb08500"} Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.617099 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.617210 5032 scope.go:117] "RemoveContainer" containerID="d666875f70cf7775c4b85843ee5c2fdf7258074a001edc203eb08db19a03eefe" Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.645991 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2"] Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.648379 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9qb2"] Jan 22 16:07:57 crc kubenswrapper[5032]: I0122 16:07:57.899519 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf"] Jan 22 16:07:58 crc kubenswrapper[5032]: I0122 16:07:58.460997 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52d51718-aef8-4657-859f-aae6bb36d879" path="/var/lib/kubelet/pods/52d51718-aef8-4657-859f-aae6bb36d879/volumes" Jan 22 16:08:00 crc kubenswrapper[5032]: E0122 16:08:00.217003 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 22 16:08:00 crc kubenswrapper[5032]: E0122 16:08:00.217223 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k7k6t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-g7g9c_openshift-marketplace(a130b809-3234-4d66-a8b8-1fcd5651244b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea\": context canceled" logger="UnhandledError" Jan 22 16:08:00 crc kubenswrapper[5032]: E0122 16:08:00.218353 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-g7g9c" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.264380 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.304987 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6bdf58b696-xp46g"] Jan 22 16:08:00 crc kubenswrapper[5032]: E0122 16:08:00.305393 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" containerName="controller-manager" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.305408 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" containerName="controller-manager" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.305518 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" containerName="controller-manager" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.306009 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.310205 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6bdf58b696-xp46g"] Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465268 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-proxy-ca-bundles\") pod \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465333 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szsns\" (UniqueName: \"kubernetes.io/projected/c01acdfa-c0a1-46bb-a57e-e616540daeaa-kube-api-access-szsns\") pod \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465366 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-client-ca\") pod \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465387 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-config\") pod \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465414 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c01acdfa-c0a1-46bb-a57e-e616540daeaa-serving-cert\") pod \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\" (UID: \"c01acdfa-c0a1-46bb-a57e-e616540daeaa\") " Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465562 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-proxy-ca-bundles\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465599 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-client-ca\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465634 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90035fd6-32dd-45eb-9f94-2a113874fe82-serving-cert\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465662 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp8j7\" (UniqueName: \"kubernetes.io/projected/90035fd6-32dd-45eb-9f94-2a113874fe82-kube-api-access-tp8j7\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.465688 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-config\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.466627 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-client-ca" (OuterVolumeSpecName: "client-ca") pod "c01acdfa-c0a1-46bb-a57e-e616540daeaa" (UID: "c01acdfa-c0a1-46bb-a57e-e616540daeaa"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.466718 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-config" (OuterVolumeSpecName: "config") pod "c01acdfa-c0a1-46bb-a57e-e616540daeaa" (UID: "c01acdfa-c0a1-46bb-a57e-e616540daeaa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.466742 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c01acdfa-c0a1-46bb-a57e-e616540daeaa" (UID: "c01acdfa-c0a1-46bb-a57e-e616540daeaa"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.472082 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c01acdfa-c0a1-46bb-a57e-e616540daeaa-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c01acdfa-c0a1-46bb-a57e-e616540daeaa" (UID: "c01acdfa-c0a1-46bb-a57e-e616540daeaa"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.477829 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c01acdfa-c0a1-46bb-a57e-e616540daeaa-kube-api-access-szsns" (OuterVolumeSpecName: "kube-api-access-szsns") pod "c01acdfa-c0a1-46bb-a57e-e616540daeaa" (UID: "c01acdfa-c0a1-46bb-a57e-e616540daeaa"). InnerVolumeSpecName "kube-api-access-szsns". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.567381 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90035fd6-32dd-45eb-9f94-2a113874fe82-serving-cert\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.567471 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp8j7\" (UniqueName: \"kubernetes.io/projected/90035fd6-32dd-45eb-9f94-2a113874fe82-kube-api-access-tp8j7\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.567514 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-config\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.567541 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-proxy-ca-bundles\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.567585 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-client-ca\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.568401 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szsns\" (UniqueName: \"kubernetes.io/projected/c01acdfa-c0a1-46bb-a57e-e616540daeaa-kube-api-access-szsns\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.568801 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.568845 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.568855 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c01acdfa-c0a1-46bb-a57e-e616540daeaa-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.568879 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c01acdfa-c0a1-46bb-a57e-e616540daeaa-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.570496 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-proxy-ca-bundles\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.571625 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-client-ca\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.572258 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90035fd6-32dd-45eb-9f94-2a113874fe82-serving-cert\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.573696 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-config\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.583537 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp8j7\" (UniqueName: \"kubernetes.io/projected/90035fd6-32dd-45eb-9f94-2a113874fe82-kube-api-access-tp8j7\") pod \"controller-manager-6bdf58b696-xp46g\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.628068 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.634638 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" event={"ID":"c01acdfa-c0a1-46bb-a57e-e616540daeaa","Type":"ContainerDied","Data":"41e787b8fb078563ec06d809f1251d90e8580da5ef63515ce30fdd15b0a654c8"} Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.634676 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hzc8" Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.671148 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hzc8"] Jan 22 16:08:00 crc kubenswrapper[5032]: I0122 16:08:00.674493 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hzc8"] Jan 22 16:08:01 crc kubenswrapper[5032]: E0122 16:08:01.636344 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-g7g9c" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" Jan 22 16:08:01 crc kubenswrapper[5032]: E0122 16:08:01.729723 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 22 16:08:01 crc kubenswrapper[5032]: E0122 16:08:01.729913 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-njpkr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-h6xjw_openshift-marketplace(934b652c-ef02-4e1b-8043-1d26ea9d2c56): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 16:08:01 crc kubenswrapper[5032]: E0122 16:08:01.732285 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-h6xjw" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.175205 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.176055 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.181238 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.181851 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.202650 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.202732 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.217161 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.304080 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.304133 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.304258 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.332378 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.473650 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c01acdfa-c0a1-46bb-a57e-e616540daeaa" path="/var/lib/kubelet/pods/c01acdfa-c0a1-46bb-a57e-e616540daeaa/volumes" Jan 22 16:08:02 crc kubenswrapper[5032]: I0122 16:08:02.510340 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:03 crc kubenswrapper[5032]: I0122 16:08:03.016326 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:08:03 crc kubenswrapper[5032]: I0122 16:08:03.016876 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:08:04 crc kubenswrapper[5032]: I0122 16:08:04.634477 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 16:08:05 crc kubenswrapper[5032]: E0122 16:08:05.421506 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-h6xjw" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" Jan 22 16:08:05 crc kubenswrapper[5032]: E0122 16:08:05.490644 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 22 16:08:05 crc kubenswrapper[5032]: E0122 16:08:05.490831 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rlb6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wlqfh_openshift-marketplace(d11bfc17-4e06-403a-8632-87b168d498cf): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 16:08:05 crc kubenswrapper[5032]: E0122 16:08:05.492611 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wlqfh" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.385055 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.386112 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.391190 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.460363 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.460466 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/308679bd-4ba6-4266-b7af-af98812c290f-kube-api-access\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.460493 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-var-lock\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.562644 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.562786 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.562723 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/308679bd-4ba6-4266-b7af-af98812c290f-kube-api-access\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.563331 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-var-lock\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.563426 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-var-lock\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.584727 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/308679bd-4ba6-4266-b7af-af98812c290f-kube-api-access\") pod \"installer-9-crc\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:06 crc kubenswrapper[5032]: I0122 16:08:06.746292 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:07 crc kubenswrapper[5032]: E0122 16:08:07.068269 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 22 16:08:07 crc kubenswrapper[5032]: E0122 16:08:07.069993 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lhfpt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-fx8tt_openshift-marketplace(fe241ed5-4a42-4178-8756-513449190b6b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 16:08:07 crc kubenswrapper[5032]: E0122 16:08:07.071572 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-fx8tt" podUID="fe241ed5-4a42-4178-8756-513449190b6b" Jan 22 16:08:07 crc kubenswrapper[5032]: E0122 16:08:07.082755 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 22 16:08:07 crc kubenswrapper[5032]: E0122 16:08:07.082881 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kbhd6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-q67l2_openshift-marketplace(833d74e1-afb3-429e-93c1-75904ea02347): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 16:08:07 crc kubenswrapper[5032]: E0122 16:08:07.084200 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-q67l2" podUID="833d74e1-afb3-429e-93c1-75904ea02347" Jan 22 16:08:10 crc kubenswrapper[5032]: E0122 16:08:10.802488 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-fx8tt" podUID="fe241ed5-4a42-4178-8756-513449190b6b" Jan 22 16:08:10 crc kubenswrapper[5032]: E0122 16:08:10.802511 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-q67l2" podUID="833d74e1-afb3-429e-93c1-75904ea02347" Jan 22 16:08:10 crc kubenswrapper[5032]: I0122 16:08:10.810338 5032 scope.go:117] "RemoveContainer" containerID="03bd51209aaf84097f883217b2475d582e0a9e3e1fbf0df8e481c1a43298d51b" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.051733 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-95g6b"] Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.237707 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.340759 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.344579 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf"] Jan 22 16:08:11 crc kubenswrapper[5032]: W0122 16:08:11.352476 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod308679bd_4ba6_4266_b7af_af98812c290f.slice/crio-37944e2386aa7cca1ed3e060cc8e101b4731e39d671245c71aa20e005140afeb WatchSource:0}: Error finding container 37944e2386aa7cca1ed3e060cc8e101b4731e39d671245c71aa20e005140afeb: Status 404 returned error can't find the container with id 37944e2386aa7cca1ed3e060cc8e101b4731e39d671245c71aa20e005140afeb Jan 22 16:08:11 crc kubenswrapper[5032]: W0122 16:08:11.354585 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dff015b_3b90_4ec8_8f1f_d6bd69b61271.slice/crio-bbd1d75fd5cb1e6908d8a31769539ebfe86c2e70107a31d8934906711645a5f1 WatchSource:0}: Error finding container bbd1d75fd5cb1e6908d8a31769539ebfe86c2e70107a31d8934906711645a5f1: Status 404 returned error can't find the container with id bbd1d75fd5cb1e6908d8a31769539ebfe86c2e70107a31d8934906711645a5f1 Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.388641 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.388790 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g6m26,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-zj287_openshift-marketplace(2a92ee08-99e1-4a48-80e5-5158039c8f75): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.390215 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-zj287" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.401052 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6bdf58b696-xp46g"] Jan 22 16:08:11 crc kubenswrapper[5032]: W0122 16:08:11.419999 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90035fd6_32dd_45eb_9f94_2a113874fe82.slice/crio-1f371a3f5c974268b572dad2e813fa0ff9a031d3fb36645d173cf4d41e02c319 WatchSource:0}: Error finding container 1f371a3f5c974268b572dad2e813fa0ff9a031d3fb36645d173cf4d41e02c319: Status 404 returned error can't find the container with id 1f371a3f5c974268b572dad2e813fa0ff9a031d3fb36645d173cf4d41e02c319 Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.425131 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.425299 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fjncg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-9p297_openshift-marketplace(9d0a99e7-360b-4ce1-bcc8-23aa20685880): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.427332 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-9p297" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.456705 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.457215 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2qlnc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-5f9qk_openshift-marketplace(d2abfe75-3d69-4043-add3-91011758e245): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.458534 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-5f9qk" podUID="d2abfe75-3d69-4043-add3-91011758e245" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.719600 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" podUID="3dff015b-3b90-4ec8-8f1f-d6bd69b61271" containerName="route-controller-manager" containerID="cri-o://240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e" gracePeriod=30 Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.719234 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" event={"ID":"3dff015b-3b90-4ec8-8f1f-d6bd69b61271","Type":"ContainerStarted","Data":"240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.722145 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.722226 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" event={"ID":"3dff015b-3b90-4ec8-8f1f-d6bd69b61271","Type":"ContainerStarted","Data":"bbd1d75fd5cb1e6908d8a31769539ebfe86c2e70107a31d8934906711645a5f1"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.726836 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"308679bd-4ba6-4266-b7af-af98812c290f","Type":"ContainerStarted","Data":"37944e2386aa7cca1ed3e060cc8e101b4731e39d671245c71aa20e005140afeb"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.742960 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" podStartSLOduration=34.742938762 podStartE2EDuration="34.742938762s" podCreationTimestamp="2026-01-22 16:07:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:11.742904441 +0000 UTC m=+195.555073472" watchObservedRunningTime="2026-01-22 16:08:11.742938762 +0000 UTC m=+195.555107783" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.743084 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" event={"ID":"90035fd6-32dd-45eb-9f94-2a113874fe82","Type":"ContainerStarted","Data":"c6687a03490ba3b1bb5992f90a64b09bbda947174a99fda6407e0a63e76b766f"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.743144 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" event={"ID":"90035fd6-32dd-45eb-9f94-2a113874fe82","Type":"ContainerStarted","Data":"1f371a3f5c974268b572dad2e813fa0ff9a031d3fb36645d173cf4d41e02c319"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.743888 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.757252 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"c77cf4f1-3f82-4a14-9dda-22ba6fe89145","Type":"ContainerStarted","Data":"1e70ffe5d822b319ee5aaa6e12c810032445378df17c4ec88e595546e59fffbc"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.757717 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"c77cf4f1-3f82-4a14-9dda-22ba6fe89145","Type":"ContainerStarted","Data":"b2f6e7d4af75ed70c6897d3310d2509ec0a7e9bc7eb7ba4d83b757efbdff70f2"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.761068 5032 patch_prober.go:28] interesting pod/route-controller-manager-5c554bd657-t2qdf container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.54:8443/healthz\": read tcp 10.217.0.2:49544->10.217.0.54:8443: read: connection reset by peer" start-of-body= Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.761138 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" podUID="3dff015b-3b90-4ec8-8f1f-d6bd69b61271" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.54:8443/healthz\": read tcp 10.217.0.2:49544->10.217.0.54:8443: read: connection reset by peer" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.761227 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.763765 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-95g6b" event={"ID":"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2","Type":"ContainerStarted","Data":"33ca8158559fc56210d5a4f04470163cac2ac0b288c5d80d08292d2f7eacd8b7"} Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.763813 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-95g6b" event={"ID":"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2","Type":"ContainerStarted","Data":"881dff91c7f264b67663fcf9d5fa496e30510a5d46c3f77670f93e5bb6a8c722"} Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.769041 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-9p297" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.769128 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-zj287" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" Jan 22 16:08:11 crc kubenswrapper[5032]: E0122 16:08:11.769180 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-5f9qk" podUID="d2abfe75-3d69-4043-add3-91011758e245" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.790835 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" podStartSLOduration=14.79081266 podStartE2EDuration="14.79081266s" podCreationTimestamp="2026-01-22 16:07:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:11.788505374 +0000 UTC m=+195.600674405" watchObservedRunningTime="2026-01-22 16:08:11.79081266 +0000 UTC m=+195.602981691" Jan 22 16:08:11 crc kubenswrapper[5032]: I0122 16:08:11.944366 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=9.944345997 podStartE2EDuration="9.944345997s" podCreationTimestamp="2026-01-22 16:08:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:11.943030804 +0000 UTC m=+195.755199835" watchObservedRunningTime="2026-01-22 16:08:11.944345997 +0000 UTC m=+195.756515028" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.120932 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-5c554bd657-t2qdf_3dff015b-3b90-4ec8-8f1f-d6bd69b61271/route-controller-manager/0.log" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.121021 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.152078 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd"] Jan 22 16:08:12 crc kubenswrapper[5032]: E0122 16:08:12.152353 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dff015b-3b90-4ec8-8f1f-d6bd69b61271" containerName="route-controller-manager" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.152368 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dff015b-3b90-4ec8-8f1f-d6bd69b61271" containerName="route-controller-manager" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.152505 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dff015b-3b90-4ec8-8f1f-d6bd69b61271" containerName="route-controller-manager" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.153029 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.161623 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd"] Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.242964 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-client-ca\") pod \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.243030 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwhzv\" (UniqueName: \"kubernetes.io/projected/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-kube-api-access-rwhzv\") pod \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.243057 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-config\") pod \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.243365 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-serving-cert\") pod \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\" (UID: \"3dff015b-3b90-4ec8-8f1f-d6bd69b61271\") " Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.243658 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-config\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.244098 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13e7c5db-8678-49ac-8d52-290b60b99bbf-serving-cert\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.243930 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-config" (OuterVolumeSpecName: "config") pod "3dff015b-3b90-4ec8-8f1f-d6bd69b61271" (UID: "3dff015b-3b90-4ec8-8f1f-d6bd69b61271"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.244010 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-client-ca" (OuterVolumeSpecName: "client-ca") pod "3dff015b-3b90-4ec8-8f1f-d6bd69b61271" (UID: "3dff015b-3b90-4ec8-8f1f-d6bd69b61271"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.244188 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-client-ca\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.244227 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jsf8\" (UniqueName: \"kubernetes.io/projected/13e7c5db-8678-49ac-8d52-290b60b99bbf-kube-api-access-2jsf8\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.244329 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.244346 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.249061 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3dff015b-3b90-4ec8-8f1f-d6bd69b61271" (UID: "3dff015b-3b90-4ec8-8f1f-d6bd69b61271"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.249806 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-kube-api-access-rwhzv" (OuterVolumeSpecName: "kube-api-access-rwhzv") pod "3dff015b-3b90-4ec8-8f1f-d6bd69b61271" (UID: "3dff015b-3b90-4ec8-8f1f-d6bd69b61271"). InnerVolumeSpecName "kube-api-access-rwhzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.345130 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-config\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.345218 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13e7c5db-8678-49ac-8d52-290b60b99bbf-serving-cert\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.345236 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-client-ca\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.345255 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jsf8\" (UniqueName: \"kubernetes.io/projected/13e7c5db-8678-49ac-8d52-290b60b99bbf-kube-api-access-2jsf8\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.345300 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwhzv\" (UniqueName: \"kubernetes.io/projected/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-kube-api-access-rwhzv\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.345312 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dff015b-3b90-4ec8-8f1f-d6bd69b61271-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.346354 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-config\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.346403 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-client-ca\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.349117 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13e7c5db-8678-49ac-8d52-290b60b99bbf-serving-cert\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.361671 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jsf8\" (UniqueName: \"kubernetes.io/projected/13e7c5db-8678-49ac-8d52-290b60b99bbf-kube-api-access-2jsf8\") pod \"route-controller-manager-67bcd55d44-pk4qd\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.504823 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.771621 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"308679bd-4ba6-4266-b7af-af98812c290f","Type":"ContainerStarted","Data":"8670b4bffc1f877c75ca4c7efd16ac59cb7c18058362407b96edd4c17d08869f"} Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.774822 5032 generic.go:334] "Generic (PLEG): container finished" podID="c77cf4f1-3f82-4a14-9dda-22ba6fe89145" containerID="1e70ffe5d822b319ee5aaa6e12c810032445378df17c4ec88e595546e59fffbc" exitCode=0 Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.774913 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"c77cf4f1-3f82-4a14-9dda-22ba6fe89145","Type":"ContainerDied","Data":"1e70ffe5d822b319ee5aaa6e12c810032445378df17c4ec88e595546e59fffbc"} Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.777742 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-95g6b" event={"ID":"8b3b6ab4-4b6e-4333-ad4f-d9a3798d0ed2","Type":"ContainerStarted","Data":"7b6f934f42e76ecdcf4a6d7b81e8da28888fd166b36a82b374cb7a7ef910bdd7"} Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.779417 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-5c554bd657-t2qdf_3dff015b-3b90-4ec8-8f1f-d6bd69b61271/route-controller-manager/0.log" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.779459 5032 generic.go:334] "Generic (PLEG): container finished" podID="3dff015b-3b90-4ec8-8f1f-d6bd69b61271" containerID="240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e" exitCode=255 Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.779521 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.779520 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" event={"ID":"3dff015b-3b90-4ec8-8f1f-d6bd69b61271","Type":"ContainerDied","Data":"240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e"} Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.779565 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf" event={"ID":"3dff015b-3b90-4ec8-8f1f-d6bd69b61271","Type":"ContainerDied","Data":"bbd1d75fd5cb1e6908d8a31769539ebfe86c2e70107a31d8934906711645a5f1"} Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.779587 5032 scope.go:117] "RemoveContainer" containerID="240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.787532 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=6.787490984 podStartE2EDuration="6.787490984s" podCreationTimestamp="2026-01-22 16:08:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:12.786480878 +0000 UTC m=+196.598649929" watchObservedRunningTime="2026-01-22 16:08:12.787490984 +0000 UTC m=+196.599660015" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.818564 5032 scope.go:117] "RemoveContainer" containerID="240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e" Jan 22 16:08:12 crc kubenswrapper[5032]: E0122 16:08:12.819195 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e\": container with ID starting with 240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e not found: ID does not exist" containerID="240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.819247 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e"} err="failed to get container status \"240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e\": rpc error: code = NotFound desc = could not find container \"240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e\": container with ID starting with 240f086d6258bd081ab2067877d1b07a49164455c855f34b5bfa74c8a4e2a24e not found: ID does not exist" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.831401 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-95g6b" podStartSLOduration=176.83137712 podStartE2EDuration="2m56.83137712s" podCreationTimestamp="2026-01-22 16:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:12.810480669 +0000 UTC m=+196.622649710" watchObservedRunningTime="2026-01-22 16:08:12.83137712 +0000 UTC m=+196.643546151" Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.849961 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf"] Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.854929 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c554bd657-t2qdf"] Jan 22 16:08:12 crc kubenswrapper[5032]: I0122 16:08:12.948311 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd"] Jan 22 16:08:12 crc kubenswrapper[5032]: W0122 16:08:12.953027 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13e7c5db_8678_49ac_8d52_290b60b99bbf.slice/crio-1329aa5a66c2b8644585561bea6c2126c68efe3e141848ff119c29b2e5601205 WatchSource:0}: Error finding container 1329aa5a66c2b8644585561bea6c2126c68efe3e141848ff119c29b2e5601205: Status 404 returned error can't find the container with id 1329aa5a66c2b8644585561bea6c2126c68efe3e141848ff119c29b2e5601205 Jan 22 16:08:13 crc kubenswrapper[5032]: I0122 16:08:13.788551 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" event={"ID":"13e7c5db-8678-49ac-8d52-290b60b99bbf","Type":"ContainerStarted","Data":"ea23296437f10a0a8b60e97479b71769cf6f36aeb7c9418c3930afe8ff458daf"} Jan 22 16:08:13 crc kubenswrapper[5032]: I0122 16:08:13.789066 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" event={"ID":"13e7c5db-8678-49ac-8d52-290b60b99bbf","Type":"ContainerStarted","Data":"1329aa5a66c2b8644585561bea6c2126c68efe3e141848ff119c29b2e5601205"} Jan 22 16:08:13 crc kubenswrapper[5032]: I0122 16:08:13.814627 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" podStartSLOduration=16.814602376 podStartE2EDuration="16.814602376s" podCreationTimestamp="2026-01-22 16:07:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:13.811020153 +0000 UTC m=+197.623189194" watchObservedRunningTime="2026-01-22 16:08:13.814602376 +0000 UTC m=+197.626771417" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.063207 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.174954 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kube-api-access\") pod \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.175077 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kubelet-dir\") pod \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\" (UID: \"c77cf4f1-3f82-4a14-9dda-22ba6fe89145\") " Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.175240 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c77cf4f1-3f82-4a14-9dda-22ba6fe89145" (UID: "c77cf4f1-3f82-4a14-9dda-22ba6fe89145"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.182017 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c77cf4f1-3f82-4a14-9dda-22ba6fe89145" (UID: "c77cf4f1-3f82-4a14-9dda-22ba6fe89145"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.276334 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.276377 5032 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c77cf4f1-3f82-4a14-9dda-22ba6fe89145-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.466383 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dff015b-3b90-4ec8-8f1f-d6bd69b61271" path="/var/lib/kubelet/pods/3dff015b-3b90-4ec8-8f1f-d6bd69b61271/volumes" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.795404 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"c77cf4f1-3f82-4a14-9dda-22ba6fe89145","Type":"ContainerDied","Data":"b2f6e7d4af75ed70c6897d3310d2509ec0a7e9bc7eb7ba4d83b757efbdff70f2"} Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.795469 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2f6e7d4af75ed70c6897d3310d2509ec0a7e9bc7eb7ba4d83b757efbdff70f2" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.795619 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.796048 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 16:08:14 crc kubenswrapper[5032]: I0122 16:08:14.802386 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:21 crc kubenswrapper[5032]: I0122 16:08:21.842704 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wlqfh" event={"ID":"d11bfc17-4e06-403a-8632-87b168d498cf","Type":"ContainerStarted","Data":"3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152"} Jan 22 16:08:21 crc kubenswrapper[5032]: I0122 16:08:21.844808 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerStarted","Data":"684b52166849a36ca909f7223bd253542253bfae9bf313a04bc2c5880ee1025c"} Jan 22 16:08:22 crc kubenswrapper[5032]: I0122 16:08:22.852393 5032 generic.go:334] "Generic (PLEG): container finished" podID="d11bfc17-4e06-403a-8632-87b168d498cf" containerID="3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152" exitCode=0 Jan 22 16:08:22 crc kubenswrapper[5032]: I0122 16:08:22.852488 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wlqfh" event={"ID":"d11bfc17-4e06-403a-8632-87b168d498cf","Type":"ContainerDied","Data":"3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152"} Jan 22 16:08:22 crc kubenswrapper[5032]: I0122 16:08:22.858003 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6xjw" event={"ID":"934b652c-ef02-4e1b-8043-1d26ea9d2c56","Type":"ContainerStarted","Data":"16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57"} Jan 22 16:08:22 crc kubenswrapper[5032]: I0122 16:08:22.862755 5032 generic.go:334] "Generic (PLEG): container finished" podID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerID="684b52166849a36ca909f7223bd253542253bfae9bf313a04bc2c5880ee1025c" exitCode=0 Jan 22 16:08:22 crc kubenswrapper[5032]: I0122 16:08:22.862819 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerDied","Data":"684b52166849a36ca909f7223bd253542253bfae9bf313a04bc2c5880ee1025c"} Jan 22 16:08:23 crc kubenswrapper[5032]: I0122 16:08:23.833293 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-db6bl"] Jan 22 16:08:23 crc kubenswrapper[5032]: I0122 16:08:23.874563 5032 generic.go:334] "Generic (PLEG): container finished" podID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerID="16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57" exitCode=0 Jan 22 16:08:23 crc kubenswrapper[5032]: I0122 16:08:23.874655 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6xjw" event={"ID":"934b652c-ef02-4e1b-8043-1d26ea9d2c56","Type":"ContainerDied","Data":"16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57"} Jan 22 16:08:23 crc kubenswrapper[5032]: I0122 16:08:23.879532 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerStarted","Data":"8809282e82ef2da7c3553650a0b4ee8bdeb949147b61f4ae75f2508d6d1612f5"} Jan 22 16:08:23 crc kubenswrapper[5032]: I0122 16:08:23.883514 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wlqfh" event={"ID":"d11bfc17-4e06-403a-8632-87b168d498cf","Type":"ContainerStarted","Data":"52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2"} Jan 22 16:08:23 crc kubenswrapper[5032]: I0122 16:08:23.925247 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wlqfh" podStartSLOduration=5.0315282 podStartE2EDuration="58.925214108s" podCreationTimestamp="2026-01-22 16:07:25 +0000 UTC" firstStartedPulling="2026-01-22 16:07:29.35946162 +0000 UTC m=+153.171630651" lastFinishedPulling="2026-01-22 16:08:23.253147528 +0000 UTC m=+207.065316559" observedRunningTime="2026-01-22 16:08:23.92337925 +0000 UTC m=+207.735548291" watchObservedRunningTime="2026-01-22 16:08:23.925214108 +0000 UTC m=+207.737383139" Jan 22 16:08:23 crc kubenswrapper[5032]: I0122 16:08:23.949926 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g7g9c" podStartSLOduration=5.034100046 podStartE2EDuration="57.949908827s" podCreationTimestamp="2026-01-22 16:07:26 +0000 UTC" firstStartedPulling="2026-01-22 16:07:30.371604325 +0000 UTC m=+154.183773356" lastFinishedPulling="2026-01-22 16:08:23.287413106 +0000 UTC m=+207.099582137" observedRunningTime="2026-01-22 16:08:23.948215653 +0000 UTC m=+207.760384684" watchObservedRunningTime="2026-01-22 16:08:23.949908827 +0000 UTC m=+207.762077858" Jan 22 16:08:24 crc kubenswrapper[5032]: I0122 16:08:24.943319 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6xjw" event={"ID":"934b652c-ef02-4e1b-8043-1d26ea9d2c56","Type":"ContainerStarted","Data":"44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0"} Jan 22 16:08:24 crc kubenswrapper[5032]: I0122 16:08:24.969445 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h6xjw" podStartSLOduration=2.727860229 podStartE2EDuration="1m0.969403393s" podCreationTimestamp="2026-01-22 16:07:24 +0000 UTC" firstStartedPulling="2026-01-22 16:07:26.151124007 +0000 UTC m=+149.963293038" lastFinishedPulling="2026-01-22 16:08:24.392667171 +0000 UTC m=+208.204836202" observedRunningTime="2026-01-22 16:08:24.966130718 +0000 UTC m=+208.778299749" watchObservedRunningTime="2026-01-22 16:08:24.969403393 +0000 UTC m=+208.781572434" Jan 22 16:08:25 crc kubenswrapper[5032]: I0122 16:08:25.251192 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:08:25 crc kubenswrapper[5032]: I0122 16:08:25.251294 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:08:25 crc kubenswrapper[5032]: I0122 16:08:25.948667 5032 generic.go:334] "Generic (PLEG): container finished" podID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerID="38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6" exitCode=0 Jan 22 16:08:25 crc kubenswrapper[5032]: I0122 16:08:25.949395 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj287" event={"ID":"2a92ee08-99e1-4a48-80e5-5158039c8f75","Type":"ContainerDied","Data":"38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6"} Jan 22 16:08:26 crc kubenswrapper[5032]: I0122 16:08:26.273128 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:08:26 crc kubenswrapper[5032]: I0122 16:08:26.273469 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:08:26 crc kubenswrapper[5032]: I0122 16:08:26.327037 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-h6xjw" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="registry-server" probeResult="failure" output=< Jan 22 16:08:26 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 16:08:26 crc kubenswrapper[5032]: > Jan 22 16:08:26 crc kubenswrapper[5032]: I0122 16:08:26.690615 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:08:26 crc kubenswrapper[5032]: I0122 16:08:26.691002 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:08:26 crc kubenswrapper[5032]: I0122 16:08:26.959405 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fx8tt" event={"ID":"fe241ed5-4a42-4178-8756-513449190b6b","Type":"ContainerStarted","Data":"9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6"} Jan 22 16:08:27 crc kubenswrapper[5032]: I0122 16:08:27.324682 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wlqfh" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="registry-server" probeResult="failure" output=< Jan 22 16:08:27 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 16:08:27 crc kubenswrapper[5032]: > Jan 22 16:08:27 crc kubenswrapper[5032]: I0122 16:08:27.731225 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g7g9c" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="registry-server" probeResult="failure" output=< Jan 22 16:08:27 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 16:08:27 crc kubenswrapper[5032]: > Jan 22 16:08:27 crc kubenswrapper[5032]: I0122 16:08:27.970160 5032 generic.go:334] "Generic (PLEG): container finished" podID="fe241ed5-4a42-4178-8756-513449190b6b" containerID="9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6" exitCode=0 Jan 22 16:08:27 crc kubenswrapper[5032]: I0122 16:08:27.970231 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fx8tt" event={"ID":"fe241ed5-4a42-4178-8756-513449190b6b","Type":"ContainerDied","Data":"9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6"} Jan 22 16:08:28 crc kubenswrapper[5032]: I0122 16:08:28.979503 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj287" event={"ID":"2a92ee08-99e1-4a48-80e5-5158039c8f75","Type":"ContainerStarted","Data":"74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225"} Jan 22 16:08:28 crc kubenswrapper[5032]: I0122 16:08:28.981698 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p297" event={"ID":"9d0a99e7-360b-4ce1-bcc8-23aa20685880","Type":"ContainerStarted","Data":"ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec"} Jan 22 16:08:29 crc kubenswrapper[5032]: I0122 16:08:29.025600 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zj287" podStartSLOduration=4.142399492 podStartE2EDuration="1m7.02556811s" podCreationTimestamp="2026-01-22 16:07:22 +0000 UTC" firstStartedPulling="2026-01-22 16:07:25.090545959 +0000 UTC m=+148.902714990" lastFinishedPulling="2026-01-22 16:08:27.973714567 +0000 UTC m=+211.785883608" observedRunningTime="2026-01-22 16:08:28.999713731 +0000 UTC m=+212.811882762" watchObservedRunningTime="2026-01-22 16:08:29.02556811 +0000 UTC m=+212.837737141" Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.002344 5032 generic.go:334] "Generic (PLEG): container finished" podID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerID="ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec" exitCode=0 Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.002446 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p297" event={"ID":"9d0a99e7-360b-4ce1-bcc8-23aa20685880","Type":"ContainerDied","Data":"ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec"} Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.007276 5032 generic.go:334] "Generic (PLEG): container finished" podID="d2abfe75-3d69-4043-add3-91011758e245" containerID="c641a494c23dbbd7aa3f4884cabc05a42fb5ec20bc773e57469e567ad60dbb9b" exitCode=0 Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.007717 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5f9qk" event={"ID":"d2abfe75-3d69-4043-add3-91011758e245","Type":"ContainerDied","Data":"c641a494c23dbbd7aa3f4884cabc05a42fb5ec20bc773e57469e567ad60dbb9b"} Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.014545 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fx8tt" event={"ID":"fe241ed5-4a42-4178-8756-513449190b6b","Type":"ContainerStarted","Data":"925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb"} Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.017421 5032 generic.go:334] "Generic (PLEG): container finished" podID="833d74e1-afb3-429e-93c1-75904ea02347" containerID="750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600" exitCode=0 Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.017477 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q67l2" event={"ID":"833d74e1-afb3-429e-93c1-75904ea02347","Type":"ContainerDied","Data":"750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600"} Jan 22 16:08:30 crc kubenswrapper[5032]: I0122 16:08:30.083776 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fx8tt" podStartSLOduration=2.952746109 podStartE2EDuration="1m8.083754228s" podCreationTimestamp="2026-01-22 16:07:22 +0000 UTC" firstStartedPulling="2026-01-22 16:07:23.938681474 +0000 UTC m=+147.750850505" lastFinishedPulling="2026-01-22 16:08:29.069689593 +0000 UTC m=+212.881858624" observedRunningTime="2026-01-22 16:08:30.081831658 +0000 UTC m=+213.894000729" watchObservedRunningTime="2026-01-22 16:08:30.083754228 +0000 UTC m=+213.895923259" Jan 22 16:08:31 crc kubenswrapper[5032]: I0122 16:08:31.025751 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5f9qk" event={"ID":"d2abfe75-3d69-4043-add3-91011758e245","Type":"ContainerStarted","Data":"90f5550e41a6da2d69b0d684cc08ca2de909e7ab207d2ed0bee2c74751da6838"} Jan 22 16:08:31 crc kubenswrapper[5032]: I0122 16:08:31.027831 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q67l2" event={"ID":"833d74e1-afb3-429e-93c1-75904ea02347","Type":"ContainerStarted","Data":"c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e"} Jan 22 16:08:31 crc kubenswrapper[5032]: I0122 16:08:31.029578 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p297" event={"ID":"9d0a99e7-360b-4ce1-bcc8-23aa20685880","Type":"ContainerStarted","Data":"dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e"} Jan 22 16:08:31 crc kubenswrapper[5032]: I0122 16:08:31.048326 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5f9qk" podStartSLOduration=2.656021788 podStartE2EDuration="1m8.04830763s" podCreationTimestamp="2026-01-22 16:07:23 +0000 UTC" firstStartedPulling="2026-01-22 16:07:25.004060079 +0000 UTC m=+148.816229110" lastFinishedPulling="2026-01-22 16:08:30.396345871 +0000 UTC m=+214.208514952" observedRunningTime="2026-01-22 16:08:31.047194711 +0000 UTC m=+214.859363762" watchObservedRunningTime="2026-01-22 16:08:31.04830763 +0000 UTC m=+214.860476661" Jan 22 16:08:31 crc kubenswrapper[5032]: I0122 16:08:31.069148 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9p297" podStartSLOduration=2.693048361 podStartE2EDuration="1m8.069128629s" podCreationTimestamp="2026-01-22 16:07:23 +0000 UTC" firstStartedPulling="2026-01-22 16:07:25.068235395 +0000 UTC m=+148.880404426" lastFinishedPulling="2026-01-22 16:08:30.444315643 +0000 UTC m=+214.256484694" observedRunningTime="2026-01-22 16:08:31.064685024 +0000 UTC m=+214.876854065" watchObservedRunningTime="2026-01-22 16:08:31.069128629 +0000 UTC m=+214.881297660" Jan 22 16:08:31 crc kubenswrapper[5032]: I0122 16:08:31.085207 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-q67l2" podStartSLOduration=3.990543611 podStartE2EDuration="1m6.085186535s" podCreationTimestamp="2026-01-22 16:07:25 +0000 UTC" firstStartedPulling="2026-01-22 16:07:28.33078032 +0000 UTC m=+152.142949351" lastFinishedPulling="2026-01-22 16:08:30.425423204 +0000 UTC m=+214.237592275" observedRunningTime="2026-01-22 16:08:31.082771132 +0000 UTC m=+214.894940173" watchObservedRunningTime="2026-01-22 16:08:31.085186535 +0000 UTC m=+214.897355576" Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.017209 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.017648 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.017703 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.018346 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.018422 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653" gracePeriod=600 Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.262095 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.262206 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.638844 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:08:33 crc kubenswrapper[5032]: I0122 16:08:33.639274 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.131552 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.131613 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.257356 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.260181 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.260740 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.329273 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.376531 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.376672 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:08:34 crc kubenswrapper[5032]: I0122 16:08:34.421695 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.056403 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653" exitCode=0 Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.056611 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653"} Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.109967 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.113504 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.119283 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.296904 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.351187 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.757329 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.758459 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:08:35 crc kubenswrapper[5032]: I0122 16:08:35.815622 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:08:36 crc kubenswrapper[5032]: I0122 16:08:36.134796 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:08:36 crc kubenswrapper[5032]: I0122 16:08:36.339312 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:08:36 crc kubenswrapper[5032]: I0122 16:08:36.410435 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:08:36 crc kubenswrapper[5032]: I0122 16:08:36.795381 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:08:36 crc kubenswrapper[5032]: I0122 16:08:36.868818 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.090399 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9p297"] Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.090793 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9p297" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="registry-server" containerID="cri-o://dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e" gracePeriod=2 Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.292140 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5f9qk"] Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.292530 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5f9qk" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="registry-server" containerID="cri-o://90f5550e41a6da2d69b0d684cc08ca2de909e7ab207d2ed0bee2c74751da6838" gracePeriod=2 Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.848234 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6bdf58b696-xp46g"] Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.848902 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" podUID="90035fd6-32dd-45eb-9f94-2a113874fe82" containerName="controller-manager" containerID="cri-o://c6687a03490ba3b1bb5992f90a64b09bbda947174a99fda6407e0a63e76b766f" gracePeriod=30 Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.937037 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd"] Jan 22 16:08:37 crc kubenswrapper[5032]: I0122 16:08:37.937344 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" podUID="13e7c5db-8678-49ac-8d52-290b60b99bbf" containerName="route-controller-manager" containerID="cri-o://ea23296437f10a0a8b60e97479b71769cf6f36aeb7c9418c3930afe8ff458daf" gracePeriod=30 Jan 22 16:08:38 crc kubenswrapper[5032]: I0122 16:08:38.077408 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"01e3cd0aa7557a5667d74b209043ea407f503a2a1b2f1770d7995dbb3963da89"} Jan 22 16:08:38 crc kubenswrapper[5032]: E0122 16:08:38.961222 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13e7c5db_8678_49ac_8d52_290b60b99bbf.slice/crio-conmon-ea23296437f10a0a8b60e97479b71769cf6f36aeb7c9418c3930afe8ff458daf.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.087447 5032 generic.go:334] "Generic (PLEG): container finished" podID="13e7c5db-8678-49ac-8d52-290b60b99bbf" containerID="ea23296437f10a0a8b60e97479b71769cf6f36aeb7c9418c3930afe8ff458daf" exitCode=0 Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.087560 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" event={"ID":"13e7c5db-8678-49ac-8d52-290b60b99bbf","Type":"ContainerDied","Data":"ea23296437f10a0a8b60e97479b71769cf6f36aeb7c9418c3930afe8ff458daf"} Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.089934 5032 generic.go:334] "Generic (PLEG): container finished" podID="90035fd6-32dd-45eb-9f94-2a113874fe82" containerID="c6687a03490ba3b1bb5992f90a64b09bbda947174a99fda6407e0a63e76b766f" exitCode=0 Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.090062 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" event={"ID":"90035fd6-32dd-45eb-9f94-2a113874fe82","Type":"ContainerDied","Data":"c6687a03490ba3b1bb5992f90a64b09bbda947174a99fda6407e0a63e76b766f"} Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.490852 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q67l2"] Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.491247 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-q67l2" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="registry-server" containerID="cri-o://c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e" gracePeriod=2 Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.692263 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g7g9c"] Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.693252 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-g7g9c" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="registry-server" containerID="cri-o://8809282e82ef2da7c3553650a0b4ee8bdeb949147b61f4ae75f2508d6d1612f5" gracePeriod=2 Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.900357 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.958300 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt"] Jan 22 16:08:39 crc kubenswrapper[5032]: E0122 16:08:39.958545 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13e7c5db-8678-49ac-8d52-290b60b99bbf" containerName="route-controller-manager" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.958557 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="13e7c5db-8678-49ac-8d52-290b60b99bbf" containerName="route-controller-manager" Jan 22 16:08:39 crc kubenswrapper[5032]: E0122 16:08:39.958573 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77cf4f1-3f82-4a14-9dda-22ba6fe89145" containerName="pruner" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.958579 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77cf4f1-3f82-4a14-9dda-22ba6fe89145" containerName="pruner" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.962138 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="13e7c5db-8678-49ac-8d52-290b60b99bbf" containerName="route-controller-manager" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.962153 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c77cf4f1-3f82-4a14-9dda-22ba6fe89145" containerName="pruner" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.962561 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.982370 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt"] Jan 22 16:08:39 crc kubenswrapper[5032]: I0122 16:08:39.987464 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.027541 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.033226 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjncg\" (UniqueName: \"kubernetes.io/projected/9d0a99e7-360b-4ce1-bcc8-23aa20685880-kube-api-access-fjncg\") pod \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.033483 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jsf8\" (UniqueName: \"kubernetes.io/projected/13e7c5db-8678-49ac-8d52-290b60b99bbf-kube-api-access-2jsf8\") pod \"13e7c5db-8678-49ac-8d52-290b60b99bbf\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.033581 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13e7c5db-8678-49ac-8d52-290b60b99bbf-serving-cert\") pod \"13e7c5db-8678-49ac-8d52-290b60b99bbf\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.033704 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-client-ca\") pod \"13e7c5db-8678-49ac-8d52-290b60b99bbf\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.033874 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-config\") pod \"13e7c5db-8678-49ac-8d52-290b60b99bbf\" (UID: \"13e7c5db-8678-49ac-8d52-290b60b99bbf\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.034189 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-config\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.034289 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-client-ca\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.034383 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33243934-a53d-4e7e-85ad-d25cbd64218b-serving-cert\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.034478 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv4cr\" (UniqueName: \"kubernetes.io/projected/33243934-a53d-4e7e-85ad-d25cbd64218b-kube-api-access-pv4cr\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.035012 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-client-ca" (OuterVolumeSpecName: "client-ca") pod "13e7c5db-8678-49ac-8d52-290b60b99bbf" (UID: "13e7c5db-8678-49ac-8d52-290b60b99bbf"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.035362 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-config" (OuterVolumeSpecName: "config") pod "13e7c5db-8678-49ac-8d52-290b60b99bbf" (UID: "13e7c5db-8678-49ac-8d52-290b60b99bbf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.042772 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13e7c5db-8678-49ac-8d52-290b60b99bbf-kube-api-access-2jsf8" (OuterVolumeSpecName: "kube-api-access-2jsf8") pod "13e7c5db-8678-49ac-8d52-290b60b99bbf" (UID: "13e7c5db-8678-49ac-8d52-290b60b99bbf"). InnerVolumeSpecName "kube-api-access-2jsf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.045752 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d0a99e7-360b-4ce1-bcc8-23aa20685880-kube-api-access-fjncg" (OuterVolumeSpecName: "kube-api-access-fjncg") pod "9d0a99e7-360b-4ce1-bcc8-23aa20685880" (UID: "9d0a99e7-360b-4ce1-bcc8-23aa20685880"). InnerVolumeSpecName "kube-api-access-fjncg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.047466 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13e7c5db-8678-49ac-8d52-290b60b99bbf-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "13e7c5db-8678-49ac-8d52-290b60b99bbf" (UID: "13e7c5db-8678-49ac-8d52-290b60b99bbf"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.065669 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.102759 5032 generic.go:334] "Generic (PLEG): container finished" podID="833d74e1-afb3-429e-93c1-75904ea02347" containerID="c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e" exitCode=0 Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.102839 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q67l2" event={"ID":"833d74e1-afb3-429e-93c1-75904ea02347","Type":"ContainerDied","Data":"c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.102902 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q67l2" event={"ID":"833d74e1-afb3-429e-93c1-75904ea02347","Type":"ContainerDied","Data":"1927ed7df28762c8bd26578a9070284d73c465aaa0032f1dc867939ed61e140c"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.102928 5032 scope.go:117] "RemoveContainer" containerID="c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.103071 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q67l2" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.110513 5032 generic.go:334] "Generic (PLEG): container finished" podID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerID="dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e" exitCode=0 Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.110603 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p297" event={"ID":"9d0a99e7-360b-4ce1-bcc8-23aa20685880","Type":"ContainerDied","Data":"dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.110641 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p297" event={"ID":"9d0a99e7-360b-4ce1-bcc8-23aa20685880","Type":"ContainerDied","Data":"911edb85eb15ddd9256df5256762a766f5f46dcf3090e278093b538a0a2b175b"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.110732 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p297" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.121675 5032 scope.go:117] "RemoveContainer" containerID="750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.127184 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.127214 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bdf58b696-xp46g" event={"ID":"90035fd6-32dd-45eb-9f94-2a113874fe82","Type":"ContainerDied","Data":"1f371a3f5c974268b572dad2e813fa0ff9a031d3fb36645d173cf4d41e02c319"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135265 5032 generic.go:334] "Generic (PLEG): container finished" podID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerID="8809282e82ef2da7c3553650a0b4ee8bdeb949147b61f4ae75f2508d6d1612f5" exitCode=0 Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135411 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerDied","Data":"8809282e82ef2da7c3553650a0b4ee8bdeb949147b61f4ae75f2508d6d1612f5"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135293 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-proxy-ca-bundles\") pod \"90035fd6-32dd-45eb-9f94-2a113874fe82\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135565 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90035fd6-32dd-45eb-9f94-2a113874fe82-serving-cert\") pod \"90035fd6-32dd-45eb-9f94-2a113874fe82\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135598 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-utilities\") pod \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135640 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-utilities\") pod \"833d74e1-afb3-429e-93c1-75904ea02347\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135726 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-catalog-content\") pod \"833d74e1-afb3-429e-93c1-75904ea02347\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135784 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-config\") pod \"90035fd6-32dd-45eb-9f94-2a113874fe82\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.135840 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp8j7\" (UniqueName: \"kubernetes.io/projected/90035fd6-32dd-45eb-9f94-2a113874fe82-kube-api-access-tp8j7\") pod \"90035fd6-32dd-45eb-9f94-2a113874fe82\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136060 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-client-ca\") pod \"90035fd6-32dd-45eb-9f94-2a113874fe82\" (UID: \"90035fd6-32dd-45eb-9f94-2a113874fe82\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136091 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbhd6\" (UniqueName: \"kubernetes.io/projected/833d74e1-afb3-429e-93c1-75904ea02347-kube-api-access-kbhd6\") pod \"833d74e1-afb3-429e-93c1-75904ea02347\" (UID: \"833d74e1-afb3-429e-93c1-75904ea02347\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136134 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-catalog-content\") pod \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\" (UID: \"9d0a99e7-360b-4ce1-bcc8-23aa20685880\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136494 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-config\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136574 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-client-ca\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136607 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "90035fd6-32dd-45eb-9f94-2a113874fe82" (UID: "90035fd6-32dd-45eb-9f94-2a113874fe82"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136710 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33243934-a53d-4e7e-85ad-d25cbd64218b-serving-cert\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136841 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv4cr\" (UniqueName: \"kubernetes.io/projected/33243934-a53d-4e7e-85ad-d25cbd64218b-kube-api-access-pv4cr\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136961 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136976 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13e7c5db-8678-49ac-8d52-290b60b99bbf-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136986 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjncg\" (UniqueName: \"kubernetes.io/projected/9d0a99e7-360b-4ce1-bcc8-23aa20685880-kube-api-access-fjncg\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.136998 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jsf8\" (UniqueName: \"kubernetes.io/projected/13e7c5db-8678-49ac-8d52-290b60b99bbf-kube-api-access-2jsf8\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.137008 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13e7c5db-8678-49ac-8d52-290b60b99bbf-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.137018 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.137061 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-utilities" (OuterVolumeSpecName: "utilities") pod "9d0a99e7-360b-4ce1-bcc8-23aa20685880" (UID: "9d0a99e7-360b-4ce1-bcc8-23aa20685880"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.138216 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-config" (OuterVolumeSpecName: "config") pod "90035fd6-32dd-45eb-9f94-2a113874fe82" (UID: "90035fd6-32dd-45eb-9f94-2a113874fe82"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.138529 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-client-ca\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.140154 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" event={"ID":"13e7c5db-8678-49ac-8d52-290b60b99bbf","Type":"ContainerDied","Data":"1329aa5a66c2b8644585561bea6c2126c68efe3e141848ff119c29b2e5601205"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.140950 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.141595 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90035fd6-32dd-45eb-9f94-2a113874fe82-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "90035fd6-32dd-45eb-9f94-2a113874fe82" (UID: "90035fd6-32dd-45eb-9f94-2a113874fe82"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.143223 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90035fd6-32dd-45eb-9f94-2a113874fe82-kube-api-access-tp8j7" (OuterVolumeSpecName: "kube-api-access-tp8j7") pod "90035fd6-32dd-45eb-9f94-2a113874fe82" (UID: "90035fd6-32dd-45eb-9f94-2a113874fe82"). InnerVolumeSpecName "kube-api-access-tp8j7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.143372 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-config\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.143389 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/833d74e1-afb3-429e-93c1-75904ea02347-kube-api-access-kbhd6" (OuterVolumeSpecName: "kube-api-access-kbhd6") pod "833d74e1-afb3-429e-93c1-75904ea02347" (UID: "833d74e1-afb3-429e-93c1-75904ea02347"). InnerVolumeSpecName "kube-api-access-kbhd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.144057 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-client-ca" (OuterVolumeSpecName: "client-ca") pod "90035fd6-32dd-45eb-9f94-2a113874fe82" (UID: "90035fd6-32dd-45eb-9f94-2a113874fe82"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.150036 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-utilities" (OuterVolumeSpecName: "utilities") pod "833d74e1-afb3-429e-93c1-75904ea02347" (UID: "833d74e1-afb3-429e-93c1-75904ea02347"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.156390 5032 scope.go:117] "RemoveContainer" containerID="8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.157309 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33243934-a53d-4e7e-85ad-d25cbd64218b-serving-cert\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.161034 5032 generic.go:334] "Generic (PLEG): container finished" podID="d2abfe75-3d69-4043-add3-91011758e245" containerID="90f5550e41a6da2d69b0d684cc08ca2de909e7ab207d2ed0bee2c74751da6838" exitCode=0 Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.161162 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5f9qk" event={"ID":"d2abfe75-3d69-4043-add3-91011758e245","Type":"ContainerDied","Data":"90f5550e41a6da2d69b0d684cc08ca2de909e7ab207d2ed0bee2c74751da6838"} Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.161241 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv4cr\" (UniqueName: \"kubernetes.io/projected/33243934-a53d-4e7e-85ad-d25cbd64218b-kube-api-access-pv4cr\") pod \"route-controller-manager-d598545f4-5q6kt\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.172378 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.180208 5032 scope.go:117] "RemoveContainer" containerID="c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e" Jan 22 16:08:40 crc kubenswrapper[5032]: E0122 16:08:40.180766 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e\": container with ID starting with c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e not found: ID does not exist" containerID="c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.180881 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e"} err="failed to get container status \"c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e\": rpc error: code = NotFound desc = could not find container \"c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e\": container with ID starting with c5abe50932bf6e36cf75e25326934f717a86a35ea7cd44070366c081188b5a4e not found: ID does not exist" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.181047 5032 scope.go:117] "RemoveContainer" containerID="750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.182728 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67bcd55d44-pk4qd"] Jan 22 16:08:40 crc kubenswrapper[5032]: E0122 16:08:40.183830 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600\": container with ID starting with 750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600 not found: ID does not exist" containerID="750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.183880 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600"} err="failed to get container status \"750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600\": rpc error: code = NotFound desc = could not find container \"750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600\": container with ID starting with 750af65b49ee540b81fea7d93b2aa522a3902cc1d1124339d04ee550df187600 not found: ID does not exist" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.183909 5032 scope.go:117] "RemoveContainer" containerID="8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.190041 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "833d74e1-afb3-429e-93c1-75904ea02347" (UID: "833d74e1-afb3-429e-93c1-75904ea02347"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: E0122 16:08:40.192513 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769\": container with ID starting with 8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769 not found: ID does not exist" containerID="8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.192545 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769"} err="failed to get container status \"8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769\": rpc error: code = NotFound desc = could not find container \"8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769\": container with ID starting with 8beafb3085ce3f7cc2cd183ad7c79c6303a49373d3d988a91eb499517531a769 not found: ID does not exist" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.192577 5032 scope.go:117] "RemoveContainer" containerID="dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.200958 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d0a99e7-360b-4ce1-bcc8-23aa20685880" (UID: "9d0a99e7-360b-4ce1-bcc8-23aa20685880"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.225848 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.226469 5032 scope.go:117] "RemoveContainer" containerID="ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238243 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238273 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp8j7\" (UniqueName: \"kubernetes.io/projected/90035fd6-32dd-45eb-9f94-2a113874fe82-kube-api-access-tp8j7\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238285 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/90035fd6-32dd-45eb-9f94-2a113874fe82-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238297 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbhd6\" (UniqueName: \"kubernetes.io/projected/833d74e1-afb3-429e-93c1-75904ea02347-kube-api-access-kbhd6\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238307 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238315 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90035fd6-32dd-45eb-9f94-2a113874fe82-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238323 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d0a99e7-360b-4ce1-bcc8-23aa20685880-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238331 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.238340 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/833d74e1-afb3-429e-93c1-75904ea02347-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.257074 5032 scope.go:117] "RemoveContainer" containerID="bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.278876 5032 scope.go:117] "RemoveContainer" containerID="dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e" Jan 22 16:08:40 crc kubenswrapper[5032]: E0122 16:08:40.279552 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e\": container with ID starting with dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e not found: ID does not exist" containerID="dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.279620 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e"} err="failed to get container status \"dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e\": rpc error: code = NotFound desc = could not find container \"dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e\": container with ID starting with dd8a15c95f91f09994c9d0558c8c483dc2569f57df23d995e685e2e5eb27158e not found: ID does not exist" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.279675 5032 scope.go:117] "RemoveContainer" containerID="ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec" Jan 22 16:08:40 crc kubenswrapper[5032]: E0122 16:08:40.280160 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec\": container with ID starting with ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec not found: ID does not exist" containerID="ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.280204 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec"} err="failed to get container status \"ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec\": rpc error: code = NotFound desc = could not find container \"ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec\": container with ID starting with ae3612a19ca02973a7c7b23e11622fd3a969fb3babb1c4a02afc25d9e777a8ec not found: ID does not exist" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.280220 5032 scope.go:117] "RemoveContainer" containerID="bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c" Jan 22 16:08:40 crc kubenswrapper[5032]: E0122 16:08:40.280634 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c\": container with ID starting with bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c not found: ID does not exist" containerID="bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.280693 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c"} err="failed to get container status \"bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c\": rpc error: code = NotFound desc = could not find container \"bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c\": container with ID starting with bfa898c400c2a29d6225127219537149dfffc8a6e06db206d034be0b1f37677c not found: ID does not exist" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.280735 5032 scope.go:117] "RemoveContainer" containerID="c6687a03490ba3b1bb5992f90a64b09bbda947174a99fda6407e0a63e76b766f" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.297587 5032 scope.go:117] "RemoveContainer" containerID="ea23296437f10a0a8b60e97479b71769cf6f36aeb7c9418c3930afe8ff458daf" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.307561 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.309246 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.339714 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-catalog-content\") pod \"a130b809-3234-4d66-a8b8-1fcd5651244b\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.340186 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7k6t\" (UniqueName: \"kubernetes.io/projected/a130b809-3234-4d66-a8b8-1fcd5651244b-kube-api-access-k7k6t\") pod \"a130b809-3234-4d66-a8b8-1fcd5651244b\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.340299 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-utilities\") pod \"a130b809-3234-4d66-a8b8-1fcd5651244b\" (UID: \"a130b809-3234-4d66-a8b8-1fcd5651244b\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.341258 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-utilities" (OuterVolumeSpecName: "utilities") pod "a130b809-3234-4d66-a8b8-1fcd5651244b" (UID: "a130b809-3234-4d66-a8b8-1fcd5651244b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.342939 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a130b809-3234-4d66-a8b8-1fcd5651244b-kube-api-access-k7k6t" (OuterVolumeSpecName: "kube-api-access-k7k6t") pod "a130b809-3234-4d66-a8b8-1fcd5651244b" (UID: "a130b809-3234-4d66-a8b8-1fcd5651244b"). InnerVolumeSpecName "kube-api-access-k7k6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.441463 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-catalog-content\") pod \"d2abfe75-3d69-4043-add3-91011758e245\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.441651 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qlnc\" (UniqueName: \"kubernetes.io/projected/d2abfe75-3d69-4043-add3-91011758e245-kube-api-access-2qlnc\") pod \"d2abfe75-3d69-4043-add3-91011758e245\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.441780 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-utilities\") pod \"d2abfe75-3d69-4043-add3-91011758e245\" (UID: \"d2abfe75-3d69-4043-add3-91011758e245\") " Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.442106 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7k6t\" (UniqueName: \"kubernetes.io/projected/a130b809-3234-4d66-a8b8-1fcd5651244b-kube-api-access-k7k6t\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.442174 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.442729 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q67l2"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.442956 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-utilities" (OuterVolumeSpecName: "utilities") pod "d2abfe75-3d69-4043-add3-91011758e245" (UID: "d2abfe75-3d69-4043-add3-91011758e245"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.446706 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-q67l2"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.467812 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2abfe75-3d69-4043-add3-91011758e245-kube-api-access-2qlnc" (OuterVolumeSpecName: "kube-api-access-2qlnc") pod "d2abfe75-3d69-4043-add3-91011758e245" (UID: "d2abfe75-3d69-4043-add3-91011758e245"). InnerVolumeSpecName "kube-api-access-2qlnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.495746 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13e7c5db-8678-49ac-8d52-290b60b99bbf" path="/var/lib/kubelet/pods/13e7c5db-8678-49ac-8d52-290b60b99bbf/volumes" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.496791 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="833d74e1-afb3-429e-93c1-75904ea02347" path="/var/lib/kubelet/pods/833d74e1-afb3-429e-93c1-75904ea02347/volumes" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.505412 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9p297"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.505457 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9p297"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.511497 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6bdf58b696-xp46g"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.513955 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a130b809-3234-4d66-a8b8-1fcd5651244b" (UID: "a130b809-3234-4d66-a8b8-1fcd5651244b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.516022 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6bdf58b696-xp46g"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.538991 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt"] Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.543446 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a130b809-3234-4d66-a8b8-1fcd5651244b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.543480 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qlnc\" (UniqueName: \"kubernetes.io/projected/d2abfe75-3d69-4043-add3-91011758e245-kube-api-access-2qlnc\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.543493 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:40 crc kubenswrapper[5032]: W0122 16:08:40.543781 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33243934_a53d_4e7e_85ad_d25cbd64218b.slice/crio-bdde54b5091b1730e4c277c54a3ba597dc11828b27b20738d9e326008ff2b104 WatchSource:0}: Error finding container bdde54b5091b1730e4c277c54a3ba597dc11828b27b20738d9e326008ff2b104: Status 404 returned error can't find the container with id bdde54b5091b1730e4c277c54a3ba597dc11828b27b20738d9e326008ff2b104 Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.545359 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2abfe75-3d69-4043-add3-91011758e245" (UID: "d2abfe75-3d69-4043-add3-91011758e245"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:08:40 crc kubenswrapper[5032]: I0122 16:08:40.644781 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2abfe75-3d69-4043-add3-91011758e245-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.170476 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" event={"ID":"33243934-a53d-4e7e-85ad-d25cbd64218b","Type":"ContainerStarted","Data":"ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b"} Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.170534 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" event={"ID":"33243934-a53d-4e7e-85ad-d25cbd64218b","Type":"ContainerStarted","Data":"bdde54b5091b1730e4c277c54a3ba597dc11828b27b20738d9e326008ff2b104"} Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.170941 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.177718 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5f9qk" event={"ID":"d2abfe75-3d69-4043-add3-91011758e245","Type":"ContainerDied","Data":"be9982c7d224e0285aff1f2f39d4129512ff0271ffe82dba87253e5b614909ae"} Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.177803 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5f9qk" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.177839 5032 scope.go:117] "RemoveContainer" containerID="90f5550e41a6da2d69b0d684cc08ca2de909e7ab207d2ed0bee2c74751da6838" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.182083 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.186782 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g7g9c" event={"ID":"a130b809-3234-4d66-a8b8-1fcd5651244b","Type":"ContainerDied","Data":"50216c976e0c71543f20f7cfee9610229bfc3cfc4577764626e49a4350817de3"} Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.187029 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g7g9c" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.195837 5032 scope.go:117] "RemoveContainer" containerID="c641a494c23dbbd7aa3f4884cabc05a42fb5ec20bc773e57469e567ad60dbb9b" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.203310 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" podStartSLOduration=4.203289192 podStartE2EDuration="4.203289192s" podCreationTimestamp="2026-01-22 16:08:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:41.201796853 +0000 UTC m=+225.013965904" watchObservedRunningTime="2026-01-22 16:08:41.203289192 +0000 UTC m=+225.015458223" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.220635 5032 scope.go:117] "RemoveContainer" containerID="007988985cf44a1dda214c6f0b47be1ed8b479670ce53ddce5feb531e8987cd7" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.258249 5032 scope.go:117] "RemoveContainer" containerID="8809282e82ef2da7c3553650a0b4ee8bdeb949147b61f4ae75f2508d6d1612f5" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.266655 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5f9qk"] Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.268428 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5f9qk"] Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.283926 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g7g9c"] Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.290612 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-g7g9c"] Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.295995 5032 scope.go:117] "RemoveContainer" containerID="684b52166849a36ca909f7223bd253542253bfae9bf313a04bc2c5880ee1025c" Jan 22 16:08:41 crc kubenswrapper[5032]: I0122 16:08:41.314038 5032 scope.go:117] "RemoveContainer" containerID="edda33e2f4fbfdf76c48363b5a076c851e94afe5ee931adb83fec8bd826cbdcd" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.462223 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90035fd6-32dd-45eb-9f94-2a113874fe82" path="/var/lib/kubelet/pods/90035fd6-32dd-45eb-9f94-2a113874fe82/volumes" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.464236 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" path="/var/lib/kubelet/pods/9d0a99e7-360b-4ce1-bcc8-23aa20685880/volumes" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.465458 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" path="/var/lib/kubelet/pods/a130b809-3234-4d66-a8b8-1fcd5651244b/volumes" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.467609 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2abfe75-3d69-4043-add3-91011758e245" path="/var/lib/kubelet/pods/d2abfe75-3d69-4043-add3-91011758e245/volumes" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730246 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7ff9499cfd-cms2b"] Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730561 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730582 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730600 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90035fd6-32dd-45eb-9f94-2a113874fe82" containerName="controller-manager" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730612 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="90035fd6-32dd-45eb-9f94-2a113874fe82" containerName="controller-manager" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730626 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730638 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730668 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730681 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730694 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730706 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730729 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730741 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730755 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730767 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730786 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730797 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730814 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730826 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="extract-content" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730844 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730855 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730912 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730924 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.730940 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.730982 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="extract-utilities" Jan 22 16:08:42 crc kubenswrapper[5032]: E0122 16:08:42.731001 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.731013 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.731169 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="90035fd6-32dd-45eb-9f94-2a113874fe82" containerName="controller-manager" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.731203 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a130b809-3234-4d66-a8b8-1fcd5651244b" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.731217 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2abfe75-3d69-4043-add3-91011758e245" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.731234 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="833d74e1-afb3-429e-93c1-75904ea02347" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.731251 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d0a99e7-360b-4ce1-bcc8-23aa20685880" containerName="registry-server" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.731931 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.735135 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.735808 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.738224 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.740297 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.745753 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.745753 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.750729 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9499cfd-cms2b"] Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.755510 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.780441 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9g59\" (UniqueName: \"kubernetes.io/projected/84e4c279-67c5-47c4-a681-52566894bec9-kube-api-access-m9g59\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.780590 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-proxy-ca-bundles\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.780645 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84e4c279-67c5-47c4-a681-52566894bec9-serving-cert\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.780716 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-config\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.780916 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-client-ca\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.882637 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-proxy-ca-bundles\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.882736 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84e4c279-67c5-47c4-a681-52566894bec9-serving-cert\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.882783 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-config\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.882880 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-client-ca\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.882984 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9g59\" (UniqueName: \"kubernetes.io/projected/84e4c279-67c5-47c4-a681-52566894bec9-kube-api-access-m9g59\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.886073 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-proxy-ca-bundles\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.889978 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-config\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.900128 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-client-ca\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.907813 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84e4c279-67c5-47c4-a681-52566894bec9-serving-cert\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:42 crc kubenswrapper[5032]: I0122 16:08:42.936832 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9g59\" (UniqueName: \"kubernetes.io/projected/84e4c279-67c5-47c4-a681-52566894bec9-kube-api-access-m9g59\") pod \"controller-manager-7ff9499cfd-cms2b\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:43 crc kubenswrapper[5032]: I0122 16:08:43.061111 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:43 crc kubenswrapper[5032]: I0122 16:08:43.585558 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9499cfd-cms2b"] Jan 22 16:08:44 crc kubenswrapper[5032]: I0122 16:08:44.222902 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" event={"ID":"84e4c279-67c5-47c4-a681-52566894bec9","Type":"ContainerStarted","Data":"07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124"} Jan 22 16:08:44 crc kubenswrapper[5032]: I0122 16:08:44.223402 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" event={"ID":"84e4c279-67c5-47c4-a681-52566894bec9","Type":"ContainerStarted","Data":"7fd7d5f86bbb4b1372f49900fb036f8eff3b8772c1d3014b738139bb6779d1a0"} Jan 22 16:08:44 crc kubenswrapper[5032]: I0122 16:08:44.223800 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:44 crc kubenswrapper[5032]: I0122 16:08:44.248982 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:08:44 crc kubenswrapper[5032]: I0122 16:08:44.272644 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" podStartSLOduration=7.272619518 podStartE2EDuration="7.272619518s" podCreationTimestamp="2026-01-22 16:08:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:08:44.247564839 +0000 UTC m=+228.059733870" watchObservedRunningTime="2026-01-22 16:08:44.272619518 +0000 UTC m=+228.084788549" Jan 22 16:08:48 crc kubenswrapper[5032]: I0122 16:08:48.873734 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" containerName="oauth-openshift" containerID="cri-o://0034ac84dfebb3512ba4623425af990599deb3fe8246700a715286b51def682b" gracePeriod=15 Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.292089 5032 generic.go:334] "Generic (PLEG): container finished" podID="6ed09764-a5a8-450e-8592-a4e487efecc6" containerID="0034ac84dfebb3512ba4623425af990599deb3fe8246700a715286b51def682b" exitCode=0 Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.292229 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" event={"ID":"6ed09764-a5a8-450e-8592-a4e487efecc6","Type":"ContainerDied","Data":"0034ac84dfebb3512ba4623425af990599deb3fe8246700a715286b51def682b"} Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.439082 5032 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.440250 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.440363 5032 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.441033 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008" gracePeriod=15 Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.441091 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8" gracePeriod=15 Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.441122 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52" gracePeriod=15 Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.441035 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b" gracePeriod=15 Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.441195 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436" gracePeriod=15 Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.443520 5032 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444042 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444092 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444119 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444133 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444147 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444159 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444178 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444190 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444216 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444229 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444249 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444262 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444278 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444290 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444520 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444539 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444557 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444573 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444592 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444608 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444623 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.444816 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.444833 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486197 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486287 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486378 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486540 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486601 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486621 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486701 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.486768 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.494772 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.530398 5032 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.145:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.587873 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5vrp\" (UniqueName: \"kubernetes.io/projected/6ed09764-a5a8-450e-8592-a4e487efecc6-kube-api-access-c5vrp\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.587934 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-dir\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.587996 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-ocp-branding-template\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588023 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-router-certs\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588055 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-cliconfig\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588099 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-error\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588127 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-login\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588149 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-idp-0-file-data\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588176 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-service-ca\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588203 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-serving-cert\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588240 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-provider-selection\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588259 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-policies\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588277 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-trusted-ca-bundle\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588307 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-session\") pod \"6ed09764-a5a8-450e-8592-a4e487efecc6\" (UID: \"6ed09764-a5a8-450e-8592-a4e487efecc6\") " Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588453 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588487 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588510 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588528 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588554 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588582 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588666 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588687 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.588755 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.590949 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.592505 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593159 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593167 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593245 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593343 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593364 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593425 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593617 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593727 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.593885 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.595216 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.598440 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ed09764-a5a8-450e-8592-a4e487efecc6-kube-api-access-c5vrp" (OuterVolumeSpecName: "kube-api-access-c5vrp") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "kube-api-access-c5vrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.598561 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.600086 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.601031 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.601171 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.601314 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.601711 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.602372 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.603837 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "6ed09764-a5a8-450e-8592-a4e487efecc6" (UID: "6ed09764-a5a8-450e-8592-a4e487efecc6"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.621823 5032 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.626511 5032 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.631641 5032 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.632570 5032 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.633578 5032 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.633666 5032 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.634316 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="200ms" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691643 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691713 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691737 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691757 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691781 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691801 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691823 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691846 5032 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691894 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691918 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.691943 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5vrp\" (UniqueName: \"kubernetes.io/projected/6ed09764-a5a8-450e-8592-a4e487efecc6-kube-api-access-c5vrp\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.692258 5032 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ed09764-a5a8-450e-8592-a4e487efecc6-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.692282 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.692306 5032 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ed09764-a5a8-450e-8592-a4e487efecc6-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:49 crc kubenswrapper[5032]: I0122 16:08:49.831171 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.835831 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="400ms" Jan 22 16:08:49 crc kubenswrapper[5032]: E0122 16:08:49.872482 5032 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188d196043cc49b0 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 16:08:49.871489456 +0000 UTC m=+233.683658517,LastTimestamp:2026-01-22 16:08:49.871489456 +0000 UTC m=+233.683658517,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 16:08:50 crc kubenswrapper[5032]: E0122 16:08:50.237368 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="800ms" Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.301918 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.304107 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.305325 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52" exitCode=0 Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.305375 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8" exitCode=0 Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.305390 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008" exitCode=0 Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.305402 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436" exitCode=2 Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.305471 5032 scope.go:117] "RemoveContainer" containerID="4e5e0a0481a5dd319caf43dd6a249319c99ed66934505fb4ed3e5e731c3645b0" Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.307814 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"057ee5d7b1d336e7a8198fea1b0e2753b6ff447dcd84b9b7c309c35462aec27b"} Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.307882 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"bedc940a4946dca5271440e5a8d7cbcd13a52939f58e62267c116cc8c48375cd"} Jan 22 16:08:50 crc kubenswrapper[5032]: E0122 16:08:50.308696 5032 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.145:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.311505 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" event={"ID":"6ed09764-a5a8-450e-8592-a4e487efecc6","Type":"ContainerDied","Data":"6563076053b81de010939ef4da32cc2501e45a77317d8f11e41756776d145176"} Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.311576 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.316628 5032 generic.go:334] "Generic (PLEG): container finished" podID="308679bd-4ba6-4266-b7af-af98812c290f" containerID="8670b4bffc1f877c75ca4c7efd16ac59cb7c18058362407b96edd4c17d08869f" exitCode=0 Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.316665 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"308679bd-4ba6-4266-b7af-af98812c290f","Type":"ContainerDied","Data":"8670b4bffc1f877c75ca4c7efd16ac59cb7c18058362407b96edd4c17d08869f"} Jan 22 16:08:50 crc kubenswrapper[5032]: I0122 16:08:50.353190 5032 scope.go:117] "RemoveContainer" containerID="0034ac84dfebb3512ba4623425af990599deb3fe8246700a715286b51def682b" Jan 22 16:08:51 crc kubenswrapper[5032]: E0122 16:08:51.039671 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="1.6s" Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.331964 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 16:08:51 crc kubenswrapper[5032]: E0122 16:08:51.755154 5032 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188d196043cc49b0 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 16:08:49.871489456 +0000 UTC m=+233.683658517,LastTimestamp:2026-01-22 16:08:49.871489456 +0000 UTC m=+233.683658517,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.903131 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.927138 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/308679bd-4ba6-4266-b7af-af98812c290f-kube-api-access\") pod \"308679bd-4ba6-4266-b7af-af98812c290f\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.927391 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-var-lock\") pod \"308679bd-4ba6-4266-b7af-af98812c290f\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.927466 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-var-lock" (OuterVolumeSpecName: "var-lock") pod "308679bd-4ba6-4266-b7af-af98812c290f" (UID: "308679bd-4ba6-4266-b7af-af98812c290f"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.927619 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-kubelet-dir\") pod \"308679bd-4ba6-4266-b7af-af98812c290f\" (UID: \"308679bd-4ba6-4266-b7af-af98812c290f\") " Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.927842 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "308679bd-4ba6-4266-b7af-af98812c290f" (UID: "308679bd-4ba6-4266-b7af-af98812c290f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.928169 5032 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-var-lock\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.928284 5032 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/308679bd-4ba6-4266-b7af-af98812c290f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:51 crc kubenswrapper[5032]: I0122 16:08:51.935028 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308679bd-4ba6-4266-b7af-af98812c290f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "308679bd-4ba6-4266-b7af-af98812c290f" (UID: "308679bd-4ba6-4266-b7af-af98812c290f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.030574 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/308679bd-4ba6-4266-b7af-af98812c290f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.341402 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.342655 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.342766 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"308679bd-4ba6-4266-b7af-af98812c290f","Type":"ContainerDied","Data":"37944e2386aa7cca1ed3e060cc8e101b4731e39d671245c71aa20e005140afeb"} Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.342829 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37944e2386aa7cca1ed3e060cc8e101b4731e39d671245c71aa20e005140afeb" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.342786 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.347137 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.348366 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b" exitCode=0 Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.348439 5032 scope.go:117] "RemoveContainer" containerID="419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.382636 5032 scope.go:117] "RemoveContainer" containerID="3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.407610 5032 scope.go:117] "RemoveContainer" containerID="216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.429689 5032 scope.go:117] "RemoveContainer" containerID="4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.435721 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.435923 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.435923 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.435974 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.435995 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.436122 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.436382 5032 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.436433 5032 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.436455 5032 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.455128 5032 scope.go:117] "RemoveContainer" containerID="cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.466091 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.483309 5032 scope.go:117] "RemoveContainer" containerID="e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.521743 5032 scope.go:117] "RemoveContainer" containerID="419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52" Jan 22 16:08:52 crc kubenswrapper[5032]: E0122 16:08:52.523320 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\": container with ID starting with 419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52 not found: ID does not exist" containerID="419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.523409 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52"} err="failed to get container status \"419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\": rpc error: code = NotFound desc = could not find container \"419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52\": container with ID starting with 419cd26c8b37c7abc8d3e336af51b7ab289162621fc49ee9dd992c0e1c3b8d52 not found: ID does not exist" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.523460 5032 scope.go:117] "RemoveContainer" containerID="3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8" Jan 22 16:08:52 crc kubenswrapper[5032]: E0122 16:08:52.524427 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\": container with ID starting with 3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8 not found: ID does not exist" containerID="3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.524486 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8"} err="failed to get container status \"3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\": rpc error: code = NotFound desc = could not find container \"3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8\": container with ID starting with 3fa9c357340db480c17d50deb99ed4c981e8b647432718299207e1880c410ad8 not found: ID does not exist" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.524521 5032 scope.go:117] "RemoveContainer" containerID="216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008" Jan 22 16:08:52 crc kubenswrapper[5032]: E0122 16:08:52.525206 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\": container with ID starting with 216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008 not found: ID does not exist" containerID="216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.525252 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008"} err="failed to get container status \"216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\": rpc error: code = NotFound desc = could not find container \"216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008\": container with ID starting with 216dc7e694ae43a19656385e2e702c6a928d7206cddbf4d98d527ba70e23b008 not found: ID does not exist" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.525282 5032 scope.go:117] "RemoveContainer" containerID="4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436" Jan 22 16:08:52 crc kubenswrapper[5032]: E0122 16:08:52.525790 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\": container with ID starting with 4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436 not found: ID does not exist" containerID="4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.525854 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436"} err="failed to get container status \"4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\": rpc error: code = NotFound desc = could not find container \"4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436\": container with ID starting with 4c2940e99770d1fefabfde5629af84c78910ff29b0361a7e33bcde651cf95436 not found: ID does not exist" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.525953 5032 scope.go:117] "RemoveContainer" containerID="cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b" Jan 22 16:08:52 crc kubenswrapper[5032]: E0122 16:08:52.526442 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\": container with ID starting with cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b not found: ID does not exist" containerID="cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.526494 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b"} err="failed to get container status \"cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\": rpc error: code = NotFound desc = could not find container \"cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b\": container with ID starting with cf5a5e0511a74d48a067d8706c0493263a3587d5aec5797ff7e2b0ca4a6cce4b not found: ID does not exist" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.526527 5032 scope.go:117] "RemoveContainer" containerID="e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07" Jan 22 16:08:52 crc kubenswrapper[5032]: E0122 16:08:52.527033 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\": container with ID starting with e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07 not found: ID does not exist" containerID="e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07" Jan 22 16:08:52 crc kubenswrapper[5032]: I0122 16:08:52.527080 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07"} err="failed to get container status \"e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\": rpc error: code = NotFound desc = could not find container \"e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07\": container with ID starting with e2635d2f166511f2630560378c3570f8e0cae7f05d6a4e389e828dbca3be5d07 not found: ID does not exist" Jan 22 16:08:52 crc kubenswrapper[5032]: E0122 16:08:52.640186 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="3.2s" Jan 22 16:08:53 crc kubenswrapper[5032]: I0122 16:08:53.356585 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:08:54 crc kubenswrapper[5032]: I0122 16:08:54.497492 5032 status_manager.go:851] "Failed to get status for pod" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-db6bl\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:54 crc kubenswrapper[5032]: I0122 16:08:54.498700 5032 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:54 crc kubenswrapper[5032]: I0122 16:08:54.499111 5032 status_manager.go:851] "Failed to get status for pod" podUID="308679bd-4ba6-4266-b7af-af98812c290f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:54 crc kubenswrapper[5032]: I0122 16:08:54.499270 5032 status_manager.go:851] "Failed to get status for pod" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-db6bl\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:55 crc kubenswrapper[5032]: E0122 16:08:55.841190 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="6.4s" Jan 22 16:08:56 crc kubenswrapper[5032]: I0122 16:08:56.461277 5032 status_manager.go:851] "Failed to get status for pod" podUID="308679bd-4ba6-4266-b7af-af98812c290f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:08:56 crc kubenswrapper[5032]: I0122 16:08:56.462162 5032 status_manager.go:851] "Failed to get status for pod" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-db6bl\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:01 crc kubenswrapper[5032]: I0122 16:09:01.454645 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:01 crc kubenswrapper[5032]: I0122 16:09:01.456469 5032 status_manager.go:851] "Failed to get status for pod" podUID="308679bd-4ba6-4266-b7af-af98812c290f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:01 crc kubenswrapper[5032]: I0122 16:09:01.457308 5032 status_manager.go:851] "Failed to get status for pod" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-db6bl\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:01 crc kubenswrapper[5032]: I0122 16:09:01.470808 5032 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:01 crc kubenswrapper[5032]: I0122 16:09:01.470884 5032 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:01 crc kubenswrapper[5032]: E0122 16:09:01.471493 5032 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:01 crc kubenswrapper[5032]: I0122 16:09:01.472347 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:01 crc kubenswrapper[5032]: W0122 16:09:01.505561 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-78a6c618732b3dbbc830b9942d62af541719b2f58123dd322b359e366eabb302 WatchSource:0}: Error finding container 78a6c618732b3dbbc830b9942d62af541719b2f58123dd322b359e366eabb302: Status 404 returned error can't find the container with id 78a6c618732b3dbbc830b9942d62af541719b2f58123dd322b359e366eabb302 Jan 22 16:09:01 crc kubenswrapper[5032]: E0122 16:09:01.758156 5032 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188d196043cc49b0 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 16:08:49.871489456 +0000 UTC m=+233.683658517,LastTimestamp:2026-01-22 16:08:49.871489456 +0000 UTC m=+233.683658517,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 16:09:02 crc kubenswrapper[5032]: E0122 16:09:02.242564 5032 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="7s" Jan 22 16:09:02 crc kubenswrapper[5032]: I0122 16:09:02.416218 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"78a6c618732b3dbbc830b9942d62af541719b2f58123dd322b359e366eabb302"} Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.454830 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.455171 5032 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f2cf10cfa5d3f824d929ecbffda4badc3bd82ca69f42ed83f0081f13c2e05666" exitCode=1 Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.455246 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f2cf10cfa5d3f824d929ecbffda4badc3bd82ca69f42ed83f0081f13c2e05666"} Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.455822 5032 scope.go:117] "RemoveContainer" containerID="f2cf10cfa5d3f824d929ecbffda4badc3bd82ca69f42ed83f0081f13c2e05666" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.456143 5032 status_manager.go:851] "Failed to get status for pod" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-db6bl\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.456331 5032 status_manager.go:851] "Failed to get status for pod" podUID="308679bd-4ba6-4266-b7af-af98812c290f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.456589 5032 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.457481 5032 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="9d9da93d5cdc198e825d87a7dcf7f00a48aaffc3ab509e6c87fe260f21369089" exitCode=0 Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.457502 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"9d9da93d5cdc198e825d87a7dcf7f00a48aaffc3ab509e6c87fe260f21369089"} Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.457992 5032 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.458040 5032 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.458400 5032 status_manager.go:851] "Failed to get status for pod" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" pod="openshift-authentication/oauth-openshift-558db77b4-db6bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-db6bl\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:05 crc kubenswrapper[5032]: E0122 16:09:05.458546 5032 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.459035 5032 status_manager.go:851] "Failed to get status for pod" podUID="308679bd-4ba6-4266-b7af-af98812c290f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:05 crc kubenswrapper[5032]: I0122 16:09:05.459430 5032 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Jan 22 16:09:06 crc kubenswrapper[5032]: I0122 16:09:06.479478 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"cef17a6e13520bcd7bd647090a36de9d4668aab7e83a12ea859cc254316945ac"} Jan 22 16:09:06 crc kubenswrapper[5032]: I0122 16:09:06.479790 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"58c44271f0043f51fbb358b79b36d57d4ee3a469187b95dae78cfd78a5bb9484"} Jan 22 16:09:06 crc kubenswrapper[5032]: I0122 16:09:06.479805 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fbaa5f139ef89188155ad87188de11242f48c5f49c0080765462faadc89a8bc8"} Jan 22 16:09:06 crc kubenswrapper[5032]: I0122 16:09:06.485617 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 22 16:09:06 crc kubenswrapper[5032]: I0122 16:09:06.485664 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4b45f1bbbbb0e683d706db618e75835072e319c110dd110191425602213ab312"} Jan 22 16:09:07 crc kubenswrapper[5032]: I0122 16:09:07.130948 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:09:07 crc kubenswrapper[5032]: I0122 16:09:07.494099 5032 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:07 crc kubenswrapper[5032]: I0122 16:09:07.494132 5032 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:07 crc kubenswrapper[5032]: I0122 16:09:07.494280 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d8c2198e607aaf6975f375311b384c540311872e2d91e0d4b6b4ec83976c54a8"} Jan 22 16:09:07 crc kubenswrapper[5032]: I0122 16:09:07.494300 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7a83fd90f68e961618f79470841cd48c3a1a46864aa535b8efa685acbc09f9c6"} Jan 22 16:09:07 crc kubenswrapper[5032]: I0122 16:09:07.494324 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:10 crc kubenswrapper[5032]: I0122 16:09:10.761720 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:09:10 crc kubenswrapper[5032]: I0122 16:09:10.767789 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:09:11 crc kubenswrapper[5032]: I0122 16:09:11.472919 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:11 crc kubenswrapper[5032]: I0122 16:09:11.472998 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:11 crc kubenswrapper[5032]: I0122 16:09:11.480695 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:12 crc kubenswrapper[5032]: I0122 16:09:12.499145 5032 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:13 crc kubenswrapper[5032]: I0122 16:09:13.538404 5032 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:13 crc kubenswrapper[5032]: I0122 16:09:13.538442 5032 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:13 crc kubenswrapper[5032]: I0122 16:09:13.542893 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:13 crc kubenswrapper[5032]: I0122 16:09:13.546069 5032 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="19dd7177-41df-41c0-b8ca-a083d726a1e7" Jan 22 16:09:14 crc kubenswrapper[5032]: I0122 16:09:14.544275 5032 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:14 crc kubenswrapper[5032]: I0122 16:09:14.544320 5032 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="378ad6bf-0f91-402f-a16b-76968884acea" Jan 22 16:09:16 crc kubenswrapper[5032]: I0122 16:09:16.473147 5032 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="19dd7177-41df-41c0-b8ca-a083d726a1e7" Jan 22 16:09:17 crc kubenswrapper[5032]: I0122 16:09:17.137907 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 16:09:21 crc kubenswrapper[5032]: I0122 16:09:21.871938 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 16:09:21 crc kubenswrapper[5032]: I0122 16:09:21.871968 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 22 16:09:22 crc kubenswrapper[5032]: I0122 16:09:22.641110 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 22 16:09:22 crc kubenswrapper[5032]: I0122 16:09:22.657943 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 22 16:09:22 crc kubenswrapper[5032]: I0122 16:09:22.868143 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 22 16:09:22 crc kubenswrapper[5032]: I0122 16:09:22.909100 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 22 16:09:23 crc kubenswrapper[5032]: I0122 16:09:23.033759 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 16:09:23 crc kubenswrapper[5032]: I0122 16:09:23.201624 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 22 16:09:23 crc kubenswrapper[5032]: I0122 16:09:23.407333 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 22 16:09:23 crc kubenswrapper[5032]: I0122 16:09:23.412719 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 22 16:09:23 crc kubenswrapper[5032]: I0122 16:09:23.679829 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 22 16:09:23 crc kubenswrapper[5032]: I0122 16:09:23.692205 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 22 16:09:23 crc kubenswrapper[5032]: I0122 16:09:23.714226 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 16:09:24 crc kubenswrapper[5032]: I0122 16:09:24.078815 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 22 16:09:24 crc kubenswrapper[5032]: I0122 16:09:24.130817 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 22 16:09:24 crc kubenswrapper[5032]: I0122 16:09:24.334451 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 22 16:09:24 crc kubenswrapper[5032]: I0122 16:09:24.513566 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 22 16:09:24 crc kubenswrapper[5032]: I0122 16:09:24.535930 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 22 16:09:24 crc kubenswrapper[5032]: I0122 16:09:24.726726 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 22 16:09:24 crc kubenswrapper[5032]: I0122 16:09:24.853519 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.249195 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.289094 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.361943 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.409920 5032 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.537351 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.558972 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.620252 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.647886 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.688719 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.703648 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.726482 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 22 16:09:25 crc kubenswrapper[5032]: I0122 16:09:25.944615 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.034049 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.140566 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.167278 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.167296 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.205443 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.224433 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.338538 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.339534 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.483928 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.503779 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.507320 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.677193 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.693982 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.758063 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 22 16:09:26 crc kubenswrapper[5032]: I0122 16:09:26.933626 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.082674 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.167183 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.184940 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.301953 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.332043 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.389988 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.476458 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.516956 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.709914 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.872593 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.886590 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.906768 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 22 16:09:27 crc kubenswrapper[5032]: I0122 16:09:27.928254 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.178076 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.267117 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.304059 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.440290 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.577000 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.717565 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.881708 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.903025 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 22 16:09:28 crc kubenswrapper[5032]: I0122 16:09:28.978804 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.142586 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.170599 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.214980 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.252479 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.289072 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.307649 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.330169 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.348075 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.353107 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.472227 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.518216 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.619961 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.649368 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.691350 5032 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.691541 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.759139 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.920751 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.927053 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.937477 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.938978 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.942399 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.982481 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 16:09:29 crc kubenswrapper[5032]: I0122 16:09:29.989032 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.035641 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.050619 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.063347 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.102915 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.123410 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.146572 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.197101 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.227599 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.284296 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.295951 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.335355 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.408125 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.421398 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.575254 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.666246 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.814165 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.816538 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.827582 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.882757 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.902995 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 22 16:09:30 crc kubenswrapper[5032]: I0122 16:09:30.976278 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.195659 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.326489 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.357272 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.406528 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.468351 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.506996 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.542056 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.579108 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.672002 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.690442 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.756119 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.851282 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.892306 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.910244 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.941184 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.973351 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 22 16:09:31 crc kubenswrapper[5032]: I0122 16:09:31.987892 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.074377 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.130356 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.175036 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.262938 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.289614 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.354647 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.368169 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.415057 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.443086 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.569744 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.574111 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.588469 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.606955 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.710922 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.741146 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.748231 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.769151 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.801700 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.824978 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.859295 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 22 16:09:32 crc kubenswrapper[5032]: I0122 16:09:32.944932 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.012777 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.117306 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.145694 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.253422 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.265931 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.272334 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.274905 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.293111 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.295225 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.344403 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.352252 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.464280 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.483501 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.495477 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.667316 5032 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.775667 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 22 16:09:33 crc kubenswrapper[5032]: I0122 16:09:33.895084 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.037819 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.058547 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.076293 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.090975 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.200390 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.457484 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.507589 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.528601 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.564455 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.570549 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.590849 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.629074 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.630107 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.792714 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.910524 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.927447 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 22 16:09:34 crc kubenswrapper[5032]: I0122 16:09:34.936531 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.145506 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.177899 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.245628 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.311127 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.413211 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.416121 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.518194 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.525882 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.556208 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.560084 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.562709 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.567756 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.641250 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.698977 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.706082 5032 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.756768 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.768191 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.805296 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 22 16:09:35 crc kubenswrapper[5032]: I0122 16:09:35.849234 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:35.925259 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.017355 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.222350 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.268696 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.315931 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.321664 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.330275 5032 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.338627 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-db6bl"] Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.338713 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.345999 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.370578 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.370542722 podStartE2EDuration="24.370542722s" podCreationTimestamp="2026-01-22 16:09:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:09:36.363810767 +0000 UTC m=+280.175979838" watchObservedRunningTime="2026-01-22 16:09:36.370542722 +0000 UTC m=+280.182711803" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.411852 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.465477 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" path="/var/lib/kubelet/pods/6ed09764-a5a8-450e-8592-a4e487efecc6/volumes" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.718567 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.787508 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.827228 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.876755 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.878265 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.898282 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 22 16:09:36 crc kubenswrapper[5032]: I0122 16:09:36.956247 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.073447 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.144391 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.263939 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.310669 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.311397 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.459020 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.490797 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.508891 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.593716 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.612805 5032 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.684847 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.820341 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9499cfd-cms2b"] Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.820599 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" podUID="84e4c279-67c5-47c4-a681-52566894bec9" containerName="controller-manager" containerID="cri-o://07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124" gracePeriod=30 Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.832489 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt"] Jan 22 16:09:37 crc kubenswrapper[5032]: I0122 16:09:37.832876 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" podUID="33243934-a53d-4e7e-85ad-d25cbd64218b" containerName="route-controller-manager" containerID="cri-o://ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b" gracePeriod=30 Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.326282 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.329819 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398086 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pv4cr\" (UniqueName: \"kubernetes.io/projected/33243934-a53d-4e7e-85ad-d25cbd64218b-kube-api-access-pv4cr\") pod \"33243934-a53d-4e7e-85ad-d25cbd64218b\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398175 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9g59\" (UniqueName: \"kubernetes.io/projected/84e4c279-67c5-47c4-a681-52566894bec9-kube-api-access-m9g59\") pod \"84e4c279-67c5-47c4-a681-52566894bec9\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398241 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-client-ca\") pod \"33243934-a53d-4e7e-85ad-d25cbd64218b\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398263 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-proxy-ca-bundles\") pod \"84e4c279-67c5-47c4-a681-52566894bec9\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398292 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-config\") pod \"33243934-a53d-4e7e-85ad-d25cbd64218b\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398309 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-config\") pod \"84e4c279-67c5-47c4-a681-52566894bec9\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398326 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84e4c279-67c5-47c4-a681-52566894bec9-serving-cert\") pod \"84e4c279-67c5-47c4-a681-52566894bec9\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398373 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33243934-a53d-4e7e-85ad-d25cbd64218b-serving-cert\") pod \"33243934-a53d-4e7e-85ad-d25cbd64218b\" (UID: \"33243934-a53d-4e7e-85ad-d25cbd64218b\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.398393 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-client-ca\") pod \"84e4c279-67c5-47c4-a681-52566894bec9\" (UID: \"84e4c279-67c5-47c4-a681-52566894bec9\") " Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.399681 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-client-ca" (OuterVolumeSpecName: "client-ca") pod "84e4c279-67c5-47c4-a681-52566894bec9" (UID: "84e4c279-67c5-47c4-a681-52566894bec9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.402247 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-config" (OuterVolumeSpecName: "config") pod "84e4c279-67c5-47c4-a681-52566894bec9" (UID: "84e4c279-67c5-47c4-a681-52566894bec9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.402234 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "84e4c279-67c5-47c4-a681-52566894bec9" (UID: "84e4c279-67c5-47c4-a681-52566894bec9"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.402344 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-client-ca" (OuterVolumeSpecName: "client-ca") pod "33243934-a53d-4e7e-85ad-d25cbd64218b" (UID: "33243934-a53d-4e7e-85ad-d25cbd64218b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.403623 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-config" (OuterVolumeSpecName: "config") pod "33243934-a53d-4e7e-85ad-d25cbd64218b" (UID: "33243934-a53d-4e7e-85ad-d25cbd64218b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.408195 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33243934-a53d-4e7e-85ad-d25cbd64218b-kube-api-access-pv4cr" (OuterVolumeSpecName: "kube-api-access-pv4cr") pod "33243934-a53d-4e7e-85ad-d25cbd64218b" (UID: "33243934-a53d-4e7e-85ad-d25cbd64218b"). InnerVolumeSpecName "kube-api-access-pv4cr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.408662 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33243934-a53d-4e7e-85ad-d25cbd64218b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "33243934-a53d-4e7e-85ad-d25cbd64218b" (UID: "33243934-a53d-4e7e-85ad-d25cbd64218b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.409944 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84e4c279-67c5-47c4-a681-52566894bec9-kube-api-access-m9g59" (OuterVolumeSpecName: "kube-api-access-m9g59") pod "84e4c279-67c5-47c4-a681-52566894bec9" (UID: "84e4c279-67c5-47c4-a681-52566894bec9"). InnerVolumeSpecName "kube-api-access-m9g59". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.410085 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84e4c279-67c5-47c4-a681-52566894bec9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "84e4c279-67c5-47c4-a681-52566894bec9" (UID: "84e4c279-67c5-47c4-a681-52566894bec9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500820 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500895 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500911 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84e4c279-67c5-47c4-a681-52566894bec9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500930 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33243934-a53d-4e7e-85ad-d25cbd64218b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500941 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500954 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pv4cr\" (UniqueName: \"kubernetes.io/projected/33243934-a53d-4e7e-85ad-d25cbd64218b-kube-api-access-pv4cr\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500969 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9g59\" (UniqueName: \"kubernetes.io/projected/84e4c279-67c5-47c4-a681-52566894bec9-kube-api-access-m9g59\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500981 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/33243934-a53d-4e7e-85ad-d25cbd64218b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.500992 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84e4c279-67c5-47c4-a681-52566894bec9-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.697591 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.729128 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.729134 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" event={"ID":"33243934-a53d-4e7e-85ad-d25cbd64218b","Type":"ContainerDied","Data":"ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b"} Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.729317 5032 scope.go:117] "RemoveContainer" containerID="ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.729016 5032 generic.go:334] "Generic (PLEG): container finished" podID="33243934-a53d-4e7e-85ad-d25cbd64218b" containerID="ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b" exitCode=0 Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.730234 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt" event={"ID":"33243934-a53d-4e7e-85ad-d25cbd64218b","Type":"ContainerDied","Data":"bdde54b5091b1730e4c277c54a3ba597dc11828b27b20738d9e326008ff2b104"} Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.734551 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.734642 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" event={"ID":"84e4c279-67c5-47c4-a681-52566894bec9","Type":"ContainerDied","Data":"07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124"} Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.734794 5032 generic.go:334] "Generic (PLEG): container finished" podID="84e4c279-67c5-47c4-a681-52566894bec9" containerID="07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124" exitCode=0 Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.734832 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9499cfd-cms2b" event={"ID":"84e4c279-67c5-47c4-a681-52566894bec9","Type":"ContainerDied","Data":"7fd7d5f86bbb4b1372f49900fb036f8eff3b8772c1d3014b738139bb6779d1a0"} Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.763898 5032 scope.go:117] "RemoveContainer" containerID="ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.764302 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt"] Jan 22 16:09:38 crc kubenswrapper[5032]: E0122 16:09:38.767123 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b\": container with ID starting with ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b not found: ID does not exist" containerID="ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.767187 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b"} err="failed to get container status \"ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b\": rpc error: code = NotFound desc = could not find container \"ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b\": container with ID starting with ed938f0a34ea5a8ced8cf6670229c167f8d4a5af74adcb656d86de0d648ad67b not found: ID does not exist" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.768535 5032 scope.go:117] "RemoveContainer" containerID="07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.772140 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d598545f4-5q6kt"] Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.778559 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9499cfd-cms2b"] Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.784959 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9499cfd-cms2b"] Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.789841 5032 scope.go:117] "RemoveContainer" containerID="07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124" Jan 22 16:09:38 crc kubenswrapper[5032]: E0122 16:09:38.790690 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124\": container with ID starting with 07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124 not found: ID does not exist" containerID="07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124" Jan 22 16:09:38 crc kubenswrapper[5032]: I0122 16:09:38.790751 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124"} err="failed to get container status \"07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124\": rpc error: code = NotFound desc = could not find container \"07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124\": container with ID starting with 07fb0ee8c60b98ab619178814dde367b2ad9c09447b5f3a98eee6aca1bd4e124 not found: ID does not exist" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.048055 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.762434 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk"] Jan 22 16:09:39 crc kubenswrapper[5032]: E0122 16:09:39.762837 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="308679bd-4ba6-4266-b7af-af98812c290f" containerName="installer" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.762892 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="308679bd-4ba6-4266-b7af-af98812c290f" containerName="installer" Jan 22 16:09:39 crc kubenswrapper[5032]: E0122 16:09:39.762918 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" containerName="oauth-openshift" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.762934 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" containerName="oauth-openshift" Jan 22 16:09:39 crc kubenswrapper[5032]: E0122 16:09:39.762960 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84e4c279-67c5-47c4-a681-52566894bec9" containerName="controller-manager" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.762974 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="84e4c279-67c5-47c4-a681-52566894bec9" containerName="controller-manager" Jan 22 16:09:39 crc kubenswrapper[5032]: E0122 16:09:39.763005 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33243934-a53d-4e7e-85ad-d25cbd64218b" containerName="route-controller-manager" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.763023 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="33243934-a53d-4e7e-85ad-d25cbd64218b" containerName="route-controller-manager" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.763208 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="308679bd-4ba6-4266-b7af-af98812c290f" containerName="installer" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.763234 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="33243934-a53d-4e7e-85ad-d25cbd64218b" containerName="route-controller-manager" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.763256 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed09764-a5a8-450e-8592-a4e487efecc6" containerName="oauth-openshift" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.763279 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="84e4c279-67c5-47c4-a681-52566894bec9" containerName="controller-manager" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.763950 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.765045 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-mmt2r"] Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.765689 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.768582 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.769136 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.769677 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.770017 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.771538 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.771607 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.771650 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.772043 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.772470 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.772745 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.773218 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.773787 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.789032 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.799209 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-mmt2r"] Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.804947 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk"] Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819016 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-client-ca\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819107 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtm4x\" (UniqueName: \"kubernetes.io/projected/db6aadcd-20ad-46b9-b394-5173289a2299-kube-api-access-jtm4x\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819154 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-client-ca\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819198 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-proxy-ca-bundles\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819250 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-config\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819300 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-serving-cert\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819371 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-config\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819402 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbdlb\" (UniqueName: \"kubernetes.io/projected/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-kube-api-access-kbdlb\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.819442 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db6aadcd-20ad-46b9-b394-5173289a2299-serving-cert\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920682 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-config\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920762 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbdlb\" (UniqueName: \"kubernetes.io/projected/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-kube-api-access-kbdlb\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920794 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db6aadcd-20ad-46b9-b394-5173289a2299-serving-cert\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920836 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-client-ca\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920894 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtm4x\" (UniqueName: \"kubernetes.io/projected/db6aadcd-20ad-46b9-b394-5173289a2299-kube-api-access-jtm4x\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920922 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-client-ca\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920952 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-proxy-ca-bundles\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.920975 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-config\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.921003 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-serving-cert\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.922752 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-client-ca\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.923475 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-config\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.923508 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-config\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.924227 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-client-ca\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.924325 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-proxy-ca-bundles\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.929414 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-serving-cert\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.930805 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db6aadcd-20ad-46b9-b394-5173289a2299-serving-cert\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.944184 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbdlb\" (UniqueName: \"kubernetes.io/projected/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-kube-api-access-kbdlb\") pod \"route-controller-manager-54599f87c8-5h6rk\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:39 crc kubenswrapper[5032]: I0122 16:09:39.953391 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtm4x\" (UniqueName: \"kubernetes.io/projected/db6aadcd-20ad-46b9-b394-5173289a2299-kube-api-access-jtm4x\") pod \"controller-manager-58679666bb-mmt2r\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.086781 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.102739 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.371709 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-mmt2r"] Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.464688 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33243934-a53d-4e7e-85ad-d25cbd64218b" path="/var/lib/kubelet/pods/33243934-a53d-4e7e-85ad-d25cbd64218b/volumes" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.466480 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84e4c279-67c5-47c4-a681-52566894bec9" path="/var/lib/kubelet/pods/84e4c279-67c5-47c4-a681-52566894bec9/volumes" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.621821 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk"] Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.755611 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" event={"ID":"db6aadcd-20ad-46b9-b394-5173289a2299","Type":"ContainerStarted","Data":"3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3"} Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.755675 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" event={"ID":"db6aadcd-20ad-46b9-b394-5173289a2299","Type":"ContainerStarted","Data":"d67ae08fb857cd68fa8dd63d01485216ed25496ec97d623bbc014e4c86c889f8"} Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.757017 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.759520 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" event={"ID":"dd075780-5f3f-4eac-a4ee-2ba19f7f596d","Type":"ContainerStarted","Data":"c91a95ecca5fcf852cedfc1964738975ea5022358f61f668e78774b754c341b1"} Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.764153 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7874f76df5-6x9zj"] Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.765153 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.765274 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.781245 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782022 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782197 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782246 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782320 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782350 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782373 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782357 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782848 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.782917 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.783795 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.784414 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.786178 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7874f76df5-6x9zj"] Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.805798 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.815963 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.828851 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833617 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-login\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833680 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833715 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833750 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg69t\" (UniqueName: \"kubernetes.io/projected/19aeeb01-60c8-432f-8763-2f4477389955-kube-api-access-tg69t\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833775 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-error\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833803 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833842 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-audit-policies\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833906 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833936 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/19aeeb01-60c8-432f-8763-2f4477389955-audit-dir\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833920 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" podStartSLOduration=3.833899932 podStartE2EDuration="3.833899932s" podCreationTimestamp="2026-01-22 16:09:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:09:40.830768331 +0000 UTC m=+284.642937362" watchObservedRunningTime="2026-01-22 16:09:40.833899932 +0000 UTC m=+284.646068963" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833959 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.833994 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-service-ca\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.834044 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-session\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.834085 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-router-certs\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.834162 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936153 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936226 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-login\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936265 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936300 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936347 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg69t\" (UniqueName: \"kubernetes.io/projected/19aeeb01-60c8-432f-8763-2f4477389955-kube-api-access-tg69t\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936376 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936405 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-error\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936453 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-audit-policies\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936494 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936535 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/19aeeb01-60c8-432f-8763-2f4477389955-audit-dir\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936566 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936612 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-service-ca\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936646 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-session\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.936683 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-router-certs\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.937770 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.939692 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-audit-policies\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.939713 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-service-ca\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.940702 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.946006 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/19aeeb01-60c8-432f-8763-2f4477389955-audit-dir\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.948210 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-error\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.948209 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-session\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.948757 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.948913 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-template-login\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.949773 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.952258 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.954336 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.957616 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg69t\" (UniqueName: \"kubernetes.io/projected/19aeeb01-60c8-432f-8763-2f4477389955-kube-api-access-tg69t\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:40 crc kubenswrapper[5032]: I0122 16:09:40.957603 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/19aeeb01-60c8-432f-8763-2f4477389955-v4-0-config-system-router-certs\") pod \"oauth-openshift-7874f76df5-6x9zj\" (UID: \"19aeeb01-60c8-432f-8763-2f4477389955\") " pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.080499 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.306267 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7874f76df5-6x9zj"] Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.779532 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" event={"ID":"19aeeb01-60c8-432f-8763-2f4477389955","Type":"ContainerStarted","Data":"7b3ef1d085e798a94315f2d1cb4404899ded02f9e3784ded4ff81bc037e3c44e"} Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.780033 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" event={"ID":"19aeeb01-60c8-432f-8763-2f4477389955","Type":"ContainerStarted","Data":"bd52e1928360141ce07e6b84e14a24bd2772351016d5e32f6974d68cc4480743"} Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.780508 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.782511 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" event={"ID":"dd075780-5f3f-4eac-a4ee-2ba19f7f596d","Type":"ContainerStarted","Data":"0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de"} Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.782962 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.787546 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.806131 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" podStartSLOduration=78.806109174 podStartE2EDuration="1m18.806109174s" podCreationTimestamp="2026-01-22 16:08:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:09:41.803697741 +0000 UTC m=+285.615866782" watchObservedRunningTime="2026-01-22 16:09:41.806109174 +0000 UTC m=+285.618278195" Jan 22 16:09:41 crc kubenswrapper[5032]: I0122 16:09:41.823469 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" podStartSLOduration=4.823439212 podStartE2EDuration="4.823439212s" podCreationTimestamp="2026-01-22 16:09:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:09:41.82335974 +0000 UTC m=+285.635528811" watchObservedRunningTime="2026-01-22 16:09:41.823439212 +0000 UTC m=+285.635608283" Jan 22 16:09:42 crc kubenswrapper[5032]: I0122 16:09:42.222424 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7874f76df5-6x9zj" Jan 22 16:09:46 crc kubenswrapper[5032]: I0122 16:09:46.414022 5032 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 16:09:46 crc kubenswrapper[5032]: I0122 16:09:46.414269 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://057ee5d7b1d336e7a8198fea1b0e2753b6ff447dcd84b9b7c309c35462aec27b" gracePeriod=5 Jan 22 16:09:49 crc kubenswrapper[5032]: I0122 16:09:49.838724 5032 generic.go:334] "Generic (PLEG): container finished" podID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerID="af89b2f3a07b52d76cd04d87ad6aacb69443be78e0b12b6c5bb73fbddf14ab79" exitCode=0 Jan 22 16:09:49 crc kubenswrapper[5032]: I0122 16:09:49.838850 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" event={"ID":"fb8a5762-1a9d-4011-a049-592cd5f33244","Type":"ContainerDied","Data":"af89b2f3a07b52d76cd04d87ad6aacb69443be78e0b12b6c5bb73fbddf14ab79"} Jan 22 16:09:49 crc kubenswrapper[5032]: I0122 16:09:49.839562 5032 scope.go:117] "RemoveContainer" containerID="af89b2f3a07b52d76cd04d87ad6aacb69443be78e0b12b6c5bb73fbddf14ab79" Jan 22 16:09:50 crc kubenswrapper[5032]: I0122 16:09:50.845982 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" event={"ID":"fb8a5762-1a9d-4011-a049-592cd5f33244","Type":"ContainerStarted","Data":"b01b798396f66cfad5da65cdf44e810b8566a6178ca8b02c0cf847be47295420"} Jan 22 16:09:50 crc kubenswrapper[5032]: I0122 16:09:50.846747 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:09:50 crc kubenswrapper[5032]: I0122 16:09:50.850047 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:09:51 crc kubenswrapper[5032]: I0122 16:09:51.856181 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 22 16:09:51 crc kubenswrapper[5032]: I0122 16:09:51.857417 5032 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="057ee5d7b1d336e7a8198fea1b0e2753b6ff447dcd84b9b7c309c35462aec27b" exitCode=137 Jan 22 16:09:51 crc kubenswrapper[5032]: I0122 16:09:51.993901 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 22 16:09:51 crc kubenswrapper[5032]: I0122 16:09:51.994328 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.107546 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.108037 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.108216 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.108396 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.108585 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.107678 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.108169 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.108317 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.109042 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.122736 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.210152 5032 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.210205 5032 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.210226 5032 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.210243 5032 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.210261 5032 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.466809 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.866545 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.866794 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 16:09:52 crc kubenswrapper[5032]: I0122 16:09:52.866799 5032 scope.go:117] "RemoveContainer" containerID="057ee5d7b1d336e7a8198fea1b0e2753b6ff447dcd84b9b7c309c35462aec27b" Jan 22 16:09:55 crc kubenswrapper[5032]: I0122 16:09:55.926151 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 22 16:09:56 crc kubenswrapper[5032]: I0122 16:09:56.333351 5032 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 22 16:09:57 crc kubenswrapper[5032]: I0122 16:09:57.729827 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 16:09:57 crc kubenswrapper[5032]: I0122 16:09:57.826778 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-mmt2r"] Jan 22 16:09:57 crc kubenswrapper[5032]: I0122 16:09:57.829017 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" podUID="db6aadcd-20ad-46b9-b394-5173289a2299" containerName="controller-manager" containerID="cri-o://3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3" gracePeriod=30 Jan 22 16:09:57 crc kubenswrapper[5032]: I0122 16:09:57.842806 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk"] Jan 22 16:09:57 crc kubenswrapper[5032]: I0122 16:09:57.843179 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" podUID="dd075780-5f3f-4eac-a4ee-2ba19f7f596d" containerName="route-controller-manager" containerID="cri-o://0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de" gracePeriod=30 Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.340141 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.407566 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-config\") pod \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.407712 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-serving-cert\") pod \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.407988 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbdlb\" (UniqueName: \"kubernetes.io/projected/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-kube-api-access-kbdlb\") pod \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.408010 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-client-ca\") pod \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\" (UID: \"dd075780-5f3f-4eac-a4ee-2ba19f7f596d\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.409234 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-config" (OuterVolumeSpecName: "config") pod "dd075780-5f3f-4eac-a4ee-2ba19f7f596d" (UID: "dd075780-5f3f-4eac-a4ee-2ba19f7f596d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.409441 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-client-ca" (OuterVolumeSpecName: "client-ca") pod "dd075780-5f3f-4eac-a4ee-2ba19f7f596d" (UID: "dd075780-5f3f-4eac-a4ee-2ba19f7f596d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.415286 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-kube-api-access-kbdlb" (OuterVolumeSpecName: "kube-api-access-kbdlb") pod "dd075780-5f3f-4eac-a4ee-2ba19f7f596d" (UID: "dd075780-5f3f-4eac-a4ee-2ba19f7f596d"). InnerVolumeSpecName "kube-api-access-kbdlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.418024 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "dd075780-5f3f-4eac-a4ee-2ba19f7f596d" (UID: "dd075780-5f3f-4eac-a4ee-2ba19f7f596d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.457240 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.511901 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-client-ca\") pod \"db6aadcd-20ad-46b9-b394-5173289a2299\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.511959 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db6aadcd-20ad-46b9-b394-5173289a2299-serving-cert\") pod \"db6aadcd-20ad-46b9-b394-5173289a2299\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.511988 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-config\") pod \"db6aadcd-20ad-46b9-b394-5173289a2299\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.512054 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtm4x\" (UniqueName: \"kubernetes.io/projected/db6aadcd-20ad-46b9-b394-5173289a2299-kube-api-access-jtm4x\") pod \"db6aadcd-20ad-46b9-b394-5173289a2299\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.512084 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-proxy-ca-bundles\") pod \"db6aadcd-20ad-46b9-b394-5173289a2299\" (UID: \"db6aadcd-20ad-46b9-b394-5173289a2299\") " Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.512296 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbdlb\" (UniqueName: \"kubernetes.io/projected/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-kube-api-access-kbdlb\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.512308 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.512317 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.512349 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd075780-5f3f-4eac-a4ee-2ba19f7f596d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.513037 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-config" (OuterVolumeSpecName: "config") pod "db6aadcd-20ad-46b9-b394-5173289a2299" (UID: "db6aadcd-20ad-46b9-b394-5173289a2299"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.513229 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "db6aadcd-20ad-46b9-b394-5173289a2299" (UID: "db6aadcd-20ad-46b9-b394-5173289a2299"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.513618 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-client-ca" (OuterVolumeSpecName: "client-ca") pod "db6aadcd-20ad-46b9-b394-5173289a2299" (UID: "db6aadcd-20ad-46b9-b394-5173289a2299"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.515939 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db6aadcd-20ad-46b9-b394-5173289a2299-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "db6aadcd-20ad-46b9-b394-5173289a2299" (UID: "db6aadcd-20ad-46b9-b394-5173289a2299"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.516964 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db6aadcd-20ad-46b9-b394-5173289a2299-kube-api-access-jtm4x" (OuterVolumeSpecName: "kube-api-access-jtm4x") pod "db6aadcd-20ad-46b9-b394-5173289a2299" (UID: "db6aadcd-20ad-46b9-b394-5173289a2299"). InnerVolumeSpecName "kube-api-access-jtm4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.614312 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.614353 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db6aadcd-20ad-46b9-b394-5173289a2299-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.614363 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.614372 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtm4x\" (UniqueName: \"kubernetes.io/projected/db6aadcd-20ad-46b9-b394-5173289a2299-kube-api-access-jtm4x\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.614384 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db6aadcd-20ad-46b9-b394-5173289a2299-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.902551 5032 generic.go:334] "Generic (PLEG): container finished" podID="dd075780-5f3f-4eac-a4ee-2ba19f7f596d" containerID="0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de" exitCode=0 Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.902635 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.902654 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" event={"ID":"dd075780-5f3f-4eac-a4ee-2ba19f7f596d","Type":"ContainerDied","Data":"0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de"} Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.903184 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk" event={"ID":"dd075780-5f3f-4eac-a4ee-2ba19f7f596d","Type":"ContainerDied","Data":"c91a95ecca5fcf852cedfc1964738975ea5022358f61f668e78774b754c341b1"} Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.903234 5032 scope.go:117] "RemoveContainer" containerID="0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.905915 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.905833 5032 generic.go:334] "Generic (PLEG): container finished" podID="db6aadcd-20ad-46b9-b394-5173289a2299" containerID="3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3" exitCode=0 Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.905909 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" event={"ID":"db6aadcd-20ad-46b9-b394-5173289a2299","Type":"ContainerDied","Data":"3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3"} Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.906534 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58679666bb-mmt2r" event={"ID":"db6aadcd-20ad-46b9-b394-5173289a2299","Type":"ContainerDied","Data":"d67ae08fb857cd68fa8dd63d01485216ed25496ec97d623bbc014e4c86c889f8"} Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.933267 5032 scope.go:117] "RemoveContainer" containerID="0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de" Jan 22 16:09:58 crc kubenswrapper[5032]: E0122 16:09:58.934002 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de\": container with ID starting with 0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de not found: ID does not exist" containerID="0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.934044 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de"} err="failed to get container status \"0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de\": rpc error: code = NotFound desc = could not find container \"0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de\": container with ID starting with 0337bb3d2a72cfa96b81cd7b13486c314b74932390aaf73745e0f1c48a2656de not found: ID does not exist" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.934068 5032 scope.go:117] "RemoveContainer" containerID="3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.935657 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk"] Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.945225 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-5h6rk"] Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.948594 5032 scope.go:117] "RemoveContainer" containerID="3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3" Jan 22 16:09:58 crc kubenswrapper[5032]: E0122 16:09:58.949053 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3\": container with ID starting with 3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3 not found: ID does not exist" containerID="3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.949120 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3"} err="failed to get container status \"3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3\": rpc error: code = NotFound desc = could not find container \"3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3\": container with ID starting with 3969e82195ca7474ec8ed36d108c8423e65e891787b1a8cc6add62d2366df6c3 not found: ID does not exist" Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.951610 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-mmt2r"] Jan 22 16:09:58 crc kubenswrapper[5032]: I0122 16:09:58.957886 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-mmt2r"] Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.778239 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l"] Jan 22 16:09:59 crc kubenswrapper[5032]: E0122 16:09:59.778563 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.778583 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 16:09:59 crc kubenswrapper[5032]: E0122 16:09:59.778599 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db6aadcd-20ad-46b9-b394-5173289a2299" containerName="controller-manager" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.778608 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="db6aadcd-20ad-46b9-b394-5173289a2299" containerName="controller-manager" Jan 22 16:09:59 crc kubenswrapper[5032]: E0122 16:09:59.778622 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd075780-5f3f-4eac-a4ee-2ba19f7f596d" containerName="route-controller-manager" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.778628 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd075780-5f3f-4eac-a4ee-2ba19f7f596d" containerName="route-controller-manager" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.778733 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="db6aadcd-20ad-46b9-b394-5173289a2299" containerName="controller-manager" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.778743 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.778759 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd075780-5f3f-4eac-a4ee-2ba19f7f596d" containerName="route-controller-manager" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.779226 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.781291 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.781477 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.781637 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.781781 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.782076 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.782229 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.783186 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-857459b668-qch7z"] Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.785110 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.788105 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.788642 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.788680 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-857459b668-qch7z"] Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.788911 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.789001 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.789019 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.789701 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.793819 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l"] Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.796216 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.833777 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-client-ca\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.833897 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlgq9\" (UniqueName: \"kubernetes.io/projected/f4570b56-a88c-4640-9226-f287c8e6e5a7-kube-api-access-hlgq9\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.833932 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0883646-a9c2-4be3-8083-d4d5deeec3e7-serving-cert\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.833984 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-config\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.834031 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-config\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.834062 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4570b56-a88c-4640-9226-f287c8e6e5a7-serving-cert\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.834132 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2hgc\" (UniqueName: \"kubernetes.io/projected/a0883646-a9c2-4be3-8083-d4d5deeec3e7-kube-api-access-g2hgc\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.834150 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-client-ca\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.834287 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-proxy-ca-bundles\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.935816 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2hgc\" (UniqueName: \"kubernetes.io/projected/a0883646-a9c2-4be3-8083-d4d5deeec3e7-kube-api-access-g2hgc\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.935898 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-client-ca\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.935929 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-proxy-ca-bundles\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.935978 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-client-ca\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.936024 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0883646-a9c2-4be3-8083-d4d5deeec3e7-serving-cert\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.936048 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlgq9\" (UniqueName: \"kubernetes.io/projected/f4570b56-a88c-4640-9226-f287c8e6e5a7-kube-api-access-hlgq9\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.936086 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-config\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.936123 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-config\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.936153 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4570b56-a88c-4640-9226-f287c8e6e5a7-serving-cert\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.937104 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-client-ca\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.937283 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-proxy-ca-bundles\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.937937 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-client-ca\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.938032 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-config\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.938319 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-config\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.943324 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0883646-a9c2-4be3-8083-d4d5deeec3e7-serving-cert\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.944809 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4570b56-a88c-4640-9226-f287c8e6e5a7-serving-cert\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.955807 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2hgc\" (UniqueName: \"kubernetes.io/projected/a0883646-a9c2-4be3-8083-d4d5deeec3e7-kube-api-access-g2hgc\") pod \"route-controller-manager-67cc8d88b-zcc5l\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:09:59 crc kubenswrapper[5032]: I0122 16:09:59.956969 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlgq9\" (UniqueName: \"kubernetes.io/projected/f4570b56-a88c-4640-9226-f287c8e6e5a7-kube-api-access-hlgq9\") pod \"controller-manager-857459b668-qch7z\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.104990 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.111237 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.463775 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db6aadcd-20ad-46b9-b394-5173289a2299" path="/var/lib/kubelet/pods/db6aadcd-20ad-46b9-b394-5173289a2299/volumes" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.465032 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd075780-5f3f-4eac-a4ee-2ba19f7f596d" path="/var/lib/kubelet/pods/dd075780-5f3f-4eac-a4ee-2ba19f7f596d/volumes" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.605468 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-857459b668-qch7z"] Jan 22 16:10:00 crc kubenswrapper[5032]: W0122 16:10:00.610281 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4570b56_a88c_4640_9226_f287c8e6e5a7.slice/crio-f417ba887fc672032b2c696c9f9e6ddf53e4c01e27725cce35e819a9738512f4 WatchSource:0}: Error finding container f417ba887fc672032b2c696c9f9e6ddf53e4c01e27725cce35e819a9738512f4: Status 404 returned error can't find the container with id f417ba887fc672032b2c696c9f9e6ddf53e4c01e27725cce35e819a9738512f4 Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.640699 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l"] Jan 22 16:10:00 crc kubenswrapper[5032]: W0122 16:10:00.644684 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0883646_a9c2_4be3_8083_d4d5deeec3e7.slice/crio-2241869d11af8f119ec38f32a3e3d0fa69ed785b2b27f777639482736a6e4c22 WatchSource:0}: Error finding container 2241869d11af8f119ec38f32a3e3d0fa69ed785b2b27f777639482736a6e4c22: Status 404 returned error can't find the container with id 2241869d11af8f119ec38f32a3e3d0fa69ed785b2b27f777639482736a6e4c22 Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.929175 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" event={"ID":"f4570b56-a88c-4640-9226-f287c8e6e5a7","Type":"ContainerStarted","Data":"b8db30da7c119c87d5a4069dfd72e8ca28bca3a43cee81b1b9d58ece7b5099a3"} Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.929640 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" event={"ID":"f4570b56-a88c-4640-9226-f287c8e6e5a7","Type":"ContainerStarted","Data":"f417ba887fc672032b2c696c9f9e6ddf53e4c01e27725cce35e819a9738512f4"} Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.931674 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.933326 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" event={"ID":"a0883646-a9c2-4be3-8083-d4d5deeec3e7","Type":"ContainerStarted","Data":"537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec"} Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.933360 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" event={"ID":"a0883646-a9c2-4be3-8083-d4d5deeec3e7","Type":"ContainerStarted","Data":"2241869d11af8f119ec38f32a3e3d0fa69ed785b2b27f777639482736a6e4c22"} Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.933796 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.943724 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:10:00 crc kubenswrapper[5032]: I0122 16:10:00.961903 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" podStartSLOduration=3.9618767 podStartE2EDuration="3.9618767s" podCreationTimestamp="2026-01-22 16:09:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:10:00.958999005 +0000 UTC m=+304.771168036" watchObservedRunningTime="2026-01-22 16:10:00.9618767 +0000 UTC m=+304.774045731" Jan 22 16:10:01 crc kubenswrapper[5032]: I0122 16:10:01.036887 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" podStartSLOduration=4.036849301 podStartE2EDuration="4.036849301s" podCreationTimestamp="2026-01-22 16:09:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:10:01.032118649 +0000 UTC m=+304.844287680" watchObservedRunningTime="2026-01-22 16:10:01.036849301 +0000 UTC m=+304.849018332" Jan 22 16:10:01 crc kubenswrapper[5032]: I0122 16:10:01.281565 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:10:03 crc kubenswrapper[5032]: I0122 16:10:03.449273 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 22 16:10:06 crc kubenswrapper[5032]: I0122 16:10:06.687212 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 22 16:10:15 crc kubenswrapper[5032]: I0122 16:10:15.423142 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.127843 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l"] Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.128358 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" podUID="a0883646-a9c2-4be3-8083-d4d5deeec3e7" containerName="route-controller-manager" containerID="cri-o://537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec" gracePeriod=30 Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.752078 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.804964 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0883646-a9c2-4be3-8083-d4d5deeec3e7-serving-cert\") pod \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.805020 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2hgc\" (UniqueName: \"kubernetes.io/projected/a0883646-a9c2-4be3-8083-d4d5deeec3e7-kube-api-access-g2hgc\") pod \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.805059 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-config\") pod \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.805102 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-client-ca\") pod \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\" (UID: \"a0883646-a9c2-4be3-8083-d4d5deeec3e7\") " Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.806088 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-client-ca" (OuterVolumeSpecName: "client-ca") pod "a0883646-a9c2-4be3-8083-d4d5deeec3e7" (UID: "a0883646-a9c2-4be3-8083-d4d5deeec3e7"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.806806 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-config" (OuterVolumeSpecName: "config") pod "a0883646-a9c2-4be3-8083-d4d5deeec3e7" (UID: "a0883646-a9c2-4be3-8083-d4d5deeec3e7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.814064 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0883646-a9c2-4be3-8083-d4d5deeec3e7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a0883646-a9c2-4be3-8083-d4d5deeec3e7" (UID: "a0883646-a9c2-4be3-8083-d4d5deeec3e7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.814750 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0883646-a9c2-4be3-8083-d4d5deeec3e7-kube-api-access-g2hgc" (OuterVolumeSpecName: "kube-api-access-g2hgc") pod "a0883646-a9c2-4be3-8083-d4d5deeec3e7" (UID: "a0883646-a9c2-4be3-8083-d4d5deeec3e7"). InnerVolumeSpecName "kube-api-access-g2hgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.906495 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0883646-a9c2-4be3-8083-d4d5deeec3e7-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.906529 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2hgc\" (UniqueName: \"kubernetes.io/projected/a0883646-a9c2-4be3-8083-d4d5deeec3e7-kube-api-access-g2hgc\") on node \"crc\" DevicePath \"\"" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.906542 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:10:18 crc kubenswrapper[5032]: I0122 16:10:18.906551 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0883646-a9c2-4be3-8083-d4d5deeec3e7-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.038064 5032 generic.go:334] "Generic (PLEG): container finished" podID="a0883646-a9c2-4be3-8083-d4d5deeec3e7" containerID="537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec" exitCode=0 Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.038109 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.038132 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" event={"ID":"a0883646-a9c2-4be3-8083-d4d5deeec3e7","Type":"ContainerDied","Data":"537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec"} Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.038175 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l" event={"ID":"a0883646-a9c2-4be3-8083-d4d5deeec3e7","Type":"ContainerDied","Data":"2241869d11af8f119ec38f32a3e3d0fa69ed785b2b27f777639482736a6e4c22"} Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.038204 5032 scope.go:117] "RemoveContainer" containerID="537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.066159 5032 scope.go:117] "RemoveContainer" containerID="537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec" Jan 22 16:10:19 crc kubenswrapper[5032]: E0122 16:10:19.066740 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec\": container with ID starting with 537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec not found: ID does not exist" containerID="537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.066790 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec"} err="failed to get container status \"537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec\": rpc error: code = NotFound desc = could not find container \"537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec\": container with ID starting with 537be94572828cd3f5f5703e8b41302a707bf61644c2f9e494e40edfb81d63ec not found: ID does not exist" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.077330 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l"] Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.083517 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-zcc5l"] Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.794993 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb"] Jan 22 16:10:19 crc kubenswrapper[5032]: E0122 16:10:19.795691 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0883646-a9c2-4be3-8083-d4d5deeec3e7" containerName="route-controller-manager" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.795715 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0883646-a9c2-4be3-8083-d4d5deeec3e7" containerName="route-controller-manager" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.795989 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0883646-a9c2-4be3-8083-d4d5deeec3e7" containerName="route-controller-manager" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.796649 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.801064 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.801189 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.801280 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.801631 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.801958 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.802805 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.812057 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb"] Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.819274 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee745457-3862-47a9-ba49-41030c52d1dd-config\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.819377 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-589q2\" (UniqueName: \"kubernetes.io/projected/ee745457-3862-47a9-ba49-41030c52d1dd-kube-api-access-589q2\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.819469 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee745457-3862-47a9-ba49-41030c52d1dd-serving-cert\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.819625 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee745457-3862-47a9-ba49-41030c52d1dd-client-ca\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.921015 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee745457-3862-47a9-ba49-41030c52d1dd-client-ca\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.921102 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee745457-3862-47a9-ba49-41030c52d1dd-config\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.921151 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-589q2\" (UniqueName: \"kubernetes.io/projected/ee745457-3862-47a9-ba49-41030c52d1dd-kube-api-access-589q2\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.921732 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee745457-3862-47a9-ba49-41030c52d1dd-serving-cert\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.922635 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee745457-3862-47a9-ba49-41030c52d1dd-client-ca\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.923399 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee745457-3862-47a9-ba49-41030c52d1dd-config\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.938161 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee745457-3862-47a9-ba49-41030c52d1dd-serving-cert\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:19 crc kubenswrapper[5032]: I0122 16:10:19.957030 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-589q2\" (UniqueName: \"kubernetes.io/projected/ee745457-3862-47a9-ba49-41030c52d1dd-kube-api-access-589q2\") pod \"route-controller-manager-54599f87c8-xd9xb\" (UID: \"ee745457-3862-47a9-ba49-41030c52d1dd\") " pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:20 crc kubenswrapper[5032]: I0122 16:10:20.130588 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:20 crc kubenswrapper[5032]: I0122 16:10:20.463900 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0883646-a9c2-4be3-8083-d4d5deeec3e7" path="/var/lib/kubelet/pods/a0883646-a9c2-4be3-8083-d4d5deeec3e7/volumes" Jan 22 16:10:20 crc kubenswrapper[5032]: I0122 16:10:20.627639 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb"] Jan 22 16:10:21 crc kubenswrapper[5032]: I0122 16:10:21.058174 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" event={"ID":"ee745457-3862-47a9-ba49-41030c52d1dd","Type":"ContainerStarted","Data":"e92bf3ea841ef8b0c3f0f9edb20f1dff963bf7cb7bb9d5b545d5766b8ecf3bff"} Jan 22 16:10:21 crc kubenswrapper[5032]: I0122 16:10:21.060682 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" event={"ID":"ee745457-3862-47a9-ba49-41030c52d1dd","Type":"ContainerStarted","Data":"10610ef8618c6c18800f44781cd514de0008e7bf862c948efb090c9e4a9499b0"} Jan 22 16:10:21 crc kubenswrapper[5032]: I0122 16:10:21.060893 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:21 crc kubenswrapper[5032]: I0122 16:10:21.081932 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" podStartSLOduration=3.081911186 podStartE2EDuration="3.081911186s" podCreationTimestamp="2026-01-22 16:10:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:10:21.079175235 +0000 UTC m=+324.891344336" watchObservedRunningTime="2026-01-22 16:10:21.081911186 +0000 UTC m=+324.894080257" Jan 22 16:10:21 crc kubenswrapper[5032]: I0122 16:10:21.178117 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-54599f87c8-xd9xb" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.592917 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qz6dh"] Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.594433 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.617521 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qz6dh"] Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729188 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729246 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-registry-certificates\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729295 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729318 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-registry-tls\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729351 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729555 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2jng\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-kube-api-access-l2jng\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729576 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-trusted-ca\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.729595 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-bound-sa-token\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.752701 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.831647 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-trusted-ca\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.831715 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-bound-sa-token\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.831741 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2jng\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-kube-api-access-l2jng\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.831780 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.831810 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-registry-certificates\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.831877 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-registry-tls\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.831945 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.832956 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.833772 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-registry-certificates\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.834439 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-trusted-ca\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.842245 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.846943 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-registry-tls\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.858301 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2jng\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-kube-api-access-l2jng\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.860981 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1-bound-sa-token\") pod \"image-registry-66df7c8f76-qz6dh\" (UID: \"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1\") " pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:48 crc kubenswrapper[5032]: I0122 16:10:48.913251 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:49 crc kubenswrapper[5032]: I0122 16:10:49.419044 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qz6dh"] Jan 22 16:10:50 crc kubenswrapper[5032]: I0122 16:10:50.261363 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" event={"ID":"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1","Type":"ContainerStarted","Data":"48220790764e85a29e7b89d7734027abf059d7bd5606c834bbbac10d8a94bd04"} Jan 22 16:10:50 crc kubenswrapper[5032]: I0122 16:10:50.261741 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:10:50 crc kubenswrapper[5032]: I0122 16:10:50.261755 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" event={"ID":"eb7b9aac-dc8b-4382-8b9a-0c2bfdfdd7d1","Type":"ContainerStarted","Data":"7beb92b1cb338b8740e5618df232ebc8ea66af56a53633f99e04e2d4b96cef1f"} Jan 22 16:10:50 crc kubenswrapper[5032]: I0122 16:10:50.308932 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" podStartSLOduration=2.30890885 podStartE2EDuration="2.30890885s" podCreationTimestamp="2026-01-22 16:10:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:10:50.308465637 +0000 UTC m=+354.120634698" watchObservedRunningTime="2026-01-22 16:10:50.30890885 +0000 UTC m=+354.121077921" Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.016775 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.017485 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.783922 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zj287"] Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.784385 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zj287" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="registry-server" containerID="cri-o://74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" gracePeriod=30 Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.792026 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fx8tt"] Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.792288 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fx8tt" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="registry-server" containerID="cri-o://925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb" gracePeriod=30 Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.802665 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rsck8"] Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.803013 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" containerID="cri-o://b01b798396f66cfad5da65cdf44e810b8566a6178ca8b02c0cf847be47295420" gracePeriod=30 Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.809591 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6xjw"] Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.809819 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h6xjw" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="registry-server" containerID="cri-o://44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0" gracePeriod=30 Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.816788 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wlqfh"] Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.817267 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wlqfh" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="registry-server" containerID="cri-o://52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2" gracePeriod=30 Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.824046 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qfphp"] Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.825017 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:03 crc kubenswrapper[5032]: I0122 16:11:03.833158 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qfphp"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.019627 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlxh9\" (UniqueName: \"kubernetes.io/projected/611edd22-6336-4d33-9dbd-fcceb3246fb0-kube-api-access-dlxh9\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.019908 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/611edd22-6336-4d33-9dbd-fcceb3246fb0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.019942 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/611edd22-6336-4d33-9dbd-fcceb3246fb0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.120948 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/611edd22-6336-4d33-9dbd-fcceb3246fb0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.121004 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/611edd22-6336-4d33-9dbd-fcceb3246fb0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.121060 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlxh9\" (UniqueName: \"kubernetes.io/projected/611edd22-6336-4d33-9dbd-fcceb3246fb0-kube-api-access-dlxh9\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.122070 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/611edd22-6336-4d33-9dbd-fcceb3246fb0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.136290 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225 is running failed: container process not found" containerID="74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" cmd=["grpc_health_probe","-addr=:50051"] Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.136675 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225 is running failed: container process not found" containerID="74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" cmd=["grpc_health_probe","-addr=:50051"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.137592 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/611edd22-6336-4d33-9dbd-fcceb3246fb0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.140441 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225 is running failed: container process not found" containerID="74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" cmd=["grpc_health_probe","-addr=:50051"] Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.140480 5032 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-zj287" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="registry-server" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.142179 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlxh9\" (UniqueName: \"kubernetes.io/projected/611edd22-6336-4d33-9dbd-fcceb3246fb0-kube-api-access-dlxh9\") pod \"marketplace-operator-79b997595-qfphp\" (UID: \"611edd22-6336-4d33-9dbd-fcceb3246fb0\") " pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.152330 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.296899 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.324256 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.325217 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.332996 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlb6k\" (UniqueName: \"kubernetes.io/projected/d11bfc17-4e06-403a-8632-87b168d498cf-kube-api-access-rlb6k\") pod \"d11bfc17-4e06-403a-8632-87b168d498cf\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.333112 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6m26\" (UniqueName: \"kubernetes.io/projected/2a92ee08-99e1-4a48-80e5-5158039c8f75-kube-api-access-g6m26\") pod \"2a92ee08-99e1-4a48-80e5-5158039c8f75\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.333137 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhfpt\" (UniqueName: \"kubernetes.io/projected/fe241ed5-4a42-4178-8756-513449190b6b-kube-api-access-lhfpt\") pod \"fe241ed5-4a42-4178-8756-513449190b6b\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.333163 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-catalog-content\") pod \"2a92ee08-99e1-4a48-80e5-5158039c8f75\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.333212 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-utilities\") pod \"fe241ed5-4a42-4178-8756-513449190b6b\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.337902 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-catalog-content\") pod \"d11bfc17-4e06-403a-8632-87b168d498cf\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.337971 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-utilities\") pod \"d11bfc17-4e06-403a-8632-87b168d498cf\" (UID: \"d11bfc17-4e06-403a-8632-87b168d498cf\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.338003 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-catalog-content\") pod \"fe241ed5-4a42-4178-8756-513449190b6b\" (UID: \"fe241ed5-4a42-4178-8756-513449190b6b\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.338080 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-utilities\") pod \"2a92ee08-99e1-4a48-80e5-5158039c8f75\" (UID: \"2a92ee08-99e1-4a48-80e5-5158039c8f75\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.342804 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-utilities" (OuterVolumeSpecName: "utilities") pod "2a92ee08-99e1-4a48-80e5-5158039c8f75" (UID: "2a92ee08-99e1-4a48-80e5-5158039c8f75"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.343714 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-utilities" (OuterVolumeSpecName: "utilities") pod "d11bfc17-4e06-403a-8632-87b168d498cf" (UID: "d11bfc17-4e06-403a-8632-87b168d498cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.346224 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a92ee08-99e1-4a48-80e5-5158039c8f75-kube-api-access-g6m26" (OuterVolumeSpecName: "kube-api-access-g6m26") pod "2a92ee08-99e1-4a48-80e5-5158039c8f75" (UID: "2a92ee08-99e1-4a48-80e5-5158039c8f75"). InnerVolumeSpecName "kube-api-access-g6m26". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.349048 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-utilities" (OuterVolumeSpecName: "utilities") pod "fe241ed5-4a42-4178-8756-513449190b6b" (UID: "fe241ed5-4a42-4178-8756-513449190b6b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.350557 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.351114 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe241ed5-4a42-4178-8756-513449190b6b-kube-api-access-lhfpt" (OuterVolumeSpecName: "kube-api-access-lhfpt") pod "fe241ed5-4a42-4178-8756-513449190b6b" (UID: "fe241ed5-4a42-4178-8756-513449190b6b"). InnerVolumeSpecName "kube-api-access-lhfpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.351226 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d11bfc17-4e06-403a-8632-87b168d498cf-kube-api-access-rlb6k" (OuterVolumeSpecName: "kube-api-access-rlb6k") pod "d11bfc17-4e06-403a-8632-87b168d498cf" (UID: "d11bfc17-4e06-403a-8632-87b168d498cf"). InnerVolumeSpecName "kube-api-access-rlb6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.358874 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.358905 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.358916 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.358927 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlb6k\" (UniqueName: \"kubernetes.io/projected/d11bfc17-4e06-403a-8632-87b168d498cf-kube-api-access-rlb6k\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.358941 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6m26\" (UniqueName: \"kubernetes.io/projected/2a92ee08-99e1-4a48-80e5-5158039c8f75-kube-api-access-g6m26\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.358950 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhfpt\" (UniqueName: \"kubernetes.io/projected/fe241ed5-4a42-4178-8756-513449190b6b-kube-api-access-lhfpt\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.362414 5032 generic.go:334] "Generic (PLEG): container finished" podID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerID="74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" exitCode=0 Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.362463 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj287" event={"ID":"2a92ee08-99e1-4a48-80e5-5158039c8f75","Type":"ContainerDied","Data":"74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.362491 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj287" event={"ID":"2a92ee08-99e1-4a48-80e5-5158039c8f75","Type":"ContainerDied","Data":"08409f0652c3c38ae55288ff5a0077362401bac7f76073f9070c485373423735"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.362508 5032 scope.go:117] "RemoveContainer" containerID="74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.362622 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj287" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.367760 5032 generic.go:334] "Generic (PLEG): container finished" podID="fe241ed5-4a42-4178-8756-513449190b6b" containerID="925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb" exitCode=0 Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.367977 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fx8tt" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.368170 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fx8tt" event={"ID":"fe241ed5-4a42-4178-8756-513449190b6b","Type":"ContainerDied","Data":"925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.368217 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fx8tt" event={"ID":"fe241ed5-4a42-4178-8756-513449190b6b","Type":"ContainerDied","Data":"795f383d6289ff4152bfb7dacd3d57c0dbc9c43f770b81e83ef064fa0113799d"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.369583 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.370056 5032 generic.go:334] "Generic (PLEG): container finished" podID="d11bfc17-4e06-403a-8632-87b168d498cf" containerID="52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2" exitCode=0 Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.370122 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wlqfh" event={"ID":"d11bfc17-4e06-403a-8632-87b168d498cf","Type":"ContainerDied","Data":"52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.370146 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wlqfh" event={"ID":"d11bfc17-4e06-403a-8632-87b168d498cf","Type":"ContainerDied","Data":"a038c7625ea45ee7d161afc50da23f8fb8023c16b28162bf208e26bd725e75b0"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.370223 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wlqfh" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.372037 5032 generic.go:334] "Generic (PLEG): container finished" podID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerID="44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0" exitCode=0 Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.372081 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6xjw" event={"ID":"934b652c-ef02-4e1b-8043-1d26ea9d2c56","Type":"ContainerDied","Data":"44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.372097 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6xjw" event={"ID":"934b652c-ef02-4e1b-8043-1d26ea9d2c56","Type":"ContainerDied","Data":"e5e3f3ac4acea5c31b3c0f45e0bdb6657b7b88e1d528fa25c7ac6f9f7e009bdb"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.372148 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6xjw" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.373348 5032 generic.go:334] "Generic (PLEG): container finished" podID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerID="b01b798396f66cfad5da65cdf44e810b8566a6178ca8b02c0cf847be47295420" exitCode=0 Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.373372 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" event={"ID":"fb8a5762-1a9d-4011-a049-592cd5f33244","Type":"ContainerDied","Data":"b01b798396f66cfad5da65cdf44e810b8566a6178ca8b02c0cf847be47295420"} Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.373410 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rsck8" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.397221 5032 scope.go:117] "RemoveContainer" containerID="38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.420316 5032 scope.go:117] "RemoveContainer" containerID="cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.432189 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a92ee08-99e1-4a48-80e5-5158039c8f75" (UID: "2a92ee08-99e1-4a48-80e5-5158039c8f75"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.435170 5032 scope.go:117] "RemoveContainer" containerID="74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.435551 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225\": container with ID starting with 74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225 not found: ID does not exist" containerID="74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.435672 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225"} err="failed to get container status \"74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225\": rpc error: code = NotFound desc = could not find container \"74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225\": container with ID starting with 74da045c7a70cd5888f2fdafbe9276c50dfa9609806bc5a3b4ebfad4057c0225 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.436498 5032 scope.go:117] "RemoveContainer" containerID="38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.436887 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6\": container with ID starting with 38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6 not found: ID does not exist" containerID="38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.436910 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6"} err="failed to get container status \"38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6\": rpc error: code = NotFound desc = could not find container \"38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6\": container with ID starting with 38856bbfff048b514d3fdd1fef694f953404c67152efeec1a38b2497fd5371f6 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.436924 5032 scope.go:117] "RemoveContainer" containerID="cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.437158 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09\": container with ID starting with cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09 not found: ID does not exist" containerID="cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.437178 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09"} err="failed to get container status \"cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09\": rpc error: code = NotFound desc = could not find container \"cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09\": container with ID starting with cb016f128ce21dd724d4e70b7f26238a737925a23b46fd8969d4705054d87d09 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.437192 5032 scope.go:117] "RemoveContainer" containerID="925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.443691 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe241ed5-4a42-4178-8756-513449190b6b" (UID: "fe241ed5-4a42-4178-8756-513449190b6b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.453840 5032 scope.go:117] "RemoveContainer" containerID="9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.459543 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-operator-metrics\") pod \"fb8a5762-1a9d-4011-a049-592cd5f33244\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.459662 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njpkr\" (UniqueName: \"kubernetes.io/projected/934b652c-ef02-4e1b-8043-1d26ea9d2c56-kube-api-access-njpkr\") pod \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.459692 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-trusted-ca\") pod \"fb8a5762-1a9d-4011-a049-592cd5f33244\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.459722 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-utilities\") pod \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.459741 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6t6xb\" (UniqueName: \"kubernetes.io/projected/fb8a5762-1a9d-4011-a049-592cd5f33244-kube-api-access-6t6xb\") pod \"fb8a5762-1a9d-4011-a049-592cd5f33244\" (UID: \"fb8a5762-1a9d-4011-a049-592cd5f33244\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.459824 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-catalog-content\") pod \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\" (UID: \"934b652c-ef02-4e1b-8043-1d26ea9d2c56\") " Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.460031 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a92ee08-99e1-4a48-80e5-5158039c8f75-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.460042 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe241ed5-4a42-4178-8756-513449190b6b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.460943 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-utilities" (OuterVolumeSpecName: "utilities") pod "934b652c-ef02-4e1b-8043-1d26ea9d2c56" (UID: "934b652c-ef02-4e1b-8043-1d26ea9d2c56"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.462090 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "fb8a5762-1a9d-4011-a049-592cd5f33244" (UID: "fb8a5762-1a9d-4011-a049-592cd5f33244"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.464563 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "fb8a5762-1a9d-4011-a049-592cd5f33244" (UID: "fb8a5762-1a9d-4011-a049-592cd5f33244"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.465142 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb8a5762-1a9d-4011-a049-592cd5f33244-kube-api-access-6t6xb" (OuterVolumeSpecName: "kube-api-access-6t6xb") pod "fb8a5762-1a9d-4011-a049-592cd5f33244" (UID: "fb8a5762-1a9d-4011-a049-592cd5f33244"). InnerVolumeSpecName "kube-api-access-6t6xb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.466141 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/934b652c-ef02-4e1b-8043-1d26ea9d2c56-kube-api-access-njpkr" (OuterVolumeSpecName: "kube-api-access-njpkr") pod "934b652c-ef02-4e1b-8043-1d26ea9d2c56" (UID: "934b652c-ef02-4e1b-8043-1d26ea9d2c56"). InnerVolumeSpecName "kube-api-access-njpkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.474666 5032 scope.go:117] "RemoveContainer" containerID="28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.503161 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "934b652c-ef02-4e1b-8043-1d26ea9d2c56" (UID: "934b652c-ef02-4e1b-8043-1d26ea9d2c56"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.517669 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d11bfc17-4e06-403a-8632-87b168d498cf" (UID: "d11bfc17-4e06-403a-8632-87b168d498cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.535515 5032 scope.go:117] "RemoveContainer" containerID="925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.536292 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb\": container with ID starting with 925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb not found: ID does not exist" containerID="925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.536394 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb"} err="failed to get container status \"925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb\": rpc error: code = NotFound desc = could not find container \"925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb\": container with ID starting with 925c549864652a4061f59e0baac2566171f38d780edbf15ab25b87b5430bc2bb not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.536474 5032 scope.go:117] "RemoveContainer" containerID="9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.536914 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6\": container with ID starting with 9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6 not found: ID does not exist" containerID="9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.536956 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6"} err="failed to get container status \"9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6\": rpc error: code = NotFound desc = could not find container \"9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6\": container with ID starting with 9c5c9d5bef6d9ad492404f7280d2c6fcfd4cce28423f2da05ff804763a73dbc6 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.536971 5032 scope.go:117] "RemoveContainer" containerID="28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.537509 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea\": container with ID starting with 28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea not found: ID does not exist" containerID="28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.537625 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea"} err="failed to get container status \"28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea\": rpc error: code = NotFound desc = could not find container \"28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea\": container with ID starting with 28bc9f8a83c2f1a2d11605caa2b0eea984a6d11334e90052fe4ddcd762862fea not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.537705 5032 scope.go:117] "RemoveContainer" containerID="52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.551011 5032 scope.go:117] "RemoveContainer" containerID="3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.561275 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.561308 5032 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.561319 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11bfc17-4e06-403a-8632-87b168d498cf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.561447 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njpkr\" (UniqueName: \"kubernetes.io/projected/934b652c-ef02-4e1b-8043-1d26ea9d2c56-kube-api-access-njpkr\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.562167 5032 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8a5762-1a9d-4011-a049-592cd5f33244-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.562186 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/934b652c-ef02-4e1b-8043-1d26ea9d2c56-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.562198 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6t6xb\" (UniqueName: \"kubernetes.io/projected/fb8a5762-1a9d-4011-a049-592cd5f33244-kube-api-access-6t6xb\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.573585 5032 scope.go:117] "RemoveContainer" containerID="de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.593108 5032 scope.go:117] "RemoveContainer" containerID="52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.593631 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2\": container with ID starting with 52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2 not found: ID does not exist" containerID="52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.593672 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2"} err="failed to get container status \"52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2\": rpc error: code = NotFound desc = could not find container \"52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2\": container with ID starting with 52ec678d4aff2cfaa278932d38f8d7bcc288025874734a550cdeba9445115ea2 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.593703 5032 scope.go:117] "RemoveContainer" containerID="3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.594114 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152\": container with ID starting with 3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152 not found: ID does not exist" containerID="3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.594146 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152"} err="failed to get container status \"3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152\": rpc error: code = NotFound desc = could not find container \"3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152\": container with ID starting with 3c131cef0b26ee1543dda583de85839778bb348503299d5191b4d1dc6fd2d152 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.594161 5032 scope.go:117] "RemoveContainer" containerID="de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.594561 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04\": container with ID starting with de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04 not found: ID does not exist" containerID="de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.594615 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04"} err="failed to get container status \"de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04\": rpc error: code = NotFound desc = could not find container \"de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04\": container with ID starting with de6115c2b20d78bcc673a04a53ff32bbb65b089d86813a3f2ada577492855a04 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.594654 5032 scope.go:117] "RemoveContainer" containerID="44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.608407 5032 scope.go:117] "RemoveContainer" containerID="16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.637637 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qfphp"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.640962 5032 scope.go:117] "RemoveContainer" containerID="59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3" Jan 22 16:11:04 crc kubenswrapper[5032]: W0122 16:11:04.644458 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod611edd22_6336_4d33_9dbd_fcceb3246fb0.slice/crio-e15f48295c3981b681a75825e85defee87b572665291a847777322777af565f4 WatchSource:0}: Error finding container e15f48295c3981b681a75825e85defee87b572665291a847777322777af565f4: Status 404 returned error can't find the container with id e15f48295c3981b681a75825e85defee87b572665291a847777322777af565f4 Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.655935 5032 scope.go:117] "RemoveContainer" containerID="44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.656324 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0\": container with ID starting with 44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0 not found: ID does not exist" containerID="44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.656354 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0"} err="failed to get container status \"44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0\": rpc error: code = NotFound desc = could not find container \"44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0\": container with ID starting with 44fc11bb126e82125f429456606ebcc45df5895176ed34bd21d62cf8ccc6fee0 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.656375 5032 scope.go:117] "RemoveContainer" containerID="16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.656715 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57\": container with ID starting with 16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57 not found: ID does not exist" containerID="16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.656736 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57"} err="failed to get container status \"16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57\": rpc error: code = NotFound desc = could not find container \"16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57\": container with ID starting with 16a88f7068051e55916df4e10e51edf2d6ee23faf49e980cfd407ff018931c57 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.656750 5032 scope.go:117] "RemoveContainer" containerID="59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3" Jan 22 16:11:04 crc kubenswrapper[5032]: E0122 16:11:04.657226 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3\": container with ID starting with 59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3 not found: ID does not exist" containerID="59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.657251 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3"} err="failed to get container status \"59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3\": rpc error: code = NotFound desc = could not find container \"59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3\": container with ID starting with 59c15852363889237594857a8a4ca9b2d9cae7d8bbcd5598fb82eb664b2420a3 not found: ID does not exist" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.657265 5032 scope.go:117] "RemoveContainer" containerID="b01b798396f66cfad5da65cdf44e810b8566a6178ca8b02c0cf847be47295420" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.679527 5032 scope.go:117] "RemoveContainer" containerID="af89b2f3a07b52d76cd04d87ad6aacb69443be78e0b12b6c5bb73fbddf14ab79" Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.684953 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zj287"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.691932 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zj287"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.700129 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fx8tt"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.707562 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fx8tt"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.715016 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rsck8"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.718014 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rsck8"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.733374 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wlqfh"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.735631 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wlqfh"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.742117 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6xjw"] Jan 22 16:11:04 crc kubenswrapper[5032]: I0122 16:11:04.745170 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6xjw"] Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.387720 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" event={"ID":"611edd22-6336-4d33-9dbd-fcceb3246fb0","Type":"ContainerStarted","Data":"a4aaefbaafab85a366d6a6e9473f474798caf80c1d249c46a08015a98b1ecd9b"} Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.388320 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" event={"ID":"611edd22-6336-4d33-9dbd-fcceb3246fb0","Type":"ContainerStarted","Data":"e15f48295c3981b681a75825e85defee87b572665291a847777322777af565f4"} Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.388343 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.393652 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.406731 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-qfphp" podStartSLOduration=2.406714108 podStartE2EDuration="2.406714108s" podCreationTimestamp="2026-01-22 16:11:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:11:05.401806223 +0000 UTC m=+369.213975264" watchObservedRunningTime="2026-01-22 16:11:05.406714108 +0000 UTC m=+369.218883149" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.580988 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pbjpb"] Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581253 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581271 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581284 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581294 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581303 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581312 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581326 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581334 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581343 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581351 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581361 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581369 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581381 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581389 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581401 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581408 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581422 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581429 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581442 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581451 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581460 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581467 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581478 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581485 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="extract-utilities" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581496 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581504 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: E0122 16:11:05.581511 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581519 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="extract-content" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581624 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581635 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581645 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581654 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" containerName="marketplace-operator" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581663 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe241ed5-4a42-4178-8756-513449190b6b" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.581675 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" containerName="registry-server" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.582521 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.587772 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.589218 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbjpb"] Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.676815 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1daf631c-f05c-4a21-acf1-1bf108a35cc9-catalog-content\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.676911 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1daf631c-f05c-4a21-acf1-1bf108a35cc9-utilities\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.676959 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lnth\" (UniqueName: \"kubernetes.io/projected/1daf631c-f05c-4a21-acf1-1bf108a35cc9-kube-api-access-8lnth\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.778375 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1daf631c-f05c-4a21-acf1-1bf108a35cc9-catalog-content\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.778442 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1daf631c-f05c-4a21-acf1-1bf108a35cc9-utilities\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.778498 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lnth\" (UniqueName: \"kubernetes.io/projected/1daf631c-f05c-4a21-acf1-1bf108a35cc9-kube-api-access-8lnth\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.780436 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1daf631c-f05c-4a21-acf1-1bf108a35cc9-catalog-content\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.780539 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1daf631c-f05c-4a21-acf1-1bf108a35cc9-utilities\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.798086 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lnth\" (UniqueName: \"kubernetes.io/projected/1daf631c-f05c-4a21-acf1-1bf108a35cc9-kube-api-access-8lnth\") pod \"redhat-marketplace-pbjpb\" (UID: \"1daf631c-f05c-4a21-acf1-1bf108a35cc9\") " pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:05 crc kubenswrapper[5032]: I0122 16:11:05.912279 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.180415 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5nnrt"] Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.182188 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.191457 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.196833 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5nnrt"] Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.285616 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg6s8\" (UniqueName: \"kubernetes.io/projected/66a6f462-61b5-4b57-af6f-858294b300eb-kube-api-access-kg6s8\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.285943 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-catalog-content\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.286055 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-utilities\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.387924 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-catalog-content\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.388002 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-utilities\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.388047 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg6s8\" (UniqueName: \"kubernetes.io/projected/66a6f462-61b5-4b57-af6f-858294b300eb-kube-api-access-kg6s8\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.388565 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-utilities\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.388719 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-catalog-content\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: W0122 16:11:06.419230 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1daf631c_f05c_4a21_acf1_1bf108a35cc9.slice/crio-5211679beaf9d90e546f9dbcf5030c8e3097638b1f02dc753364ede73c2bed5e WatchSource:0}: Error finding container 5211679beaf9d90e546f9dbcf5030c8e3097638b1f02dc753364ede73c2bed5e: Status 404 returned error can't find the container with id 5211679beaf9d90e546f9dbcf5030c8e3097638b1f02dc753364ede73c2bed5e Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.419649 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbjpb"] Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.437211 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg6s8\" (UniqueName: \"kubernetes.io/projected/66a6f462-61b5-4b57-af6f-858294b300eb-kube-api-access-kg6s8\") pod \"certified-operators-5nnrt\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.466717 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a92ee08-99e1-4a48-80e5-5158039c8f75" path="/var/lib/kubelet/pods/2a92ee08-99e1-4a48-80e5-5158039c8f75/volumes" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.467676 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="934b652c-ef02-4e1b-8043-1d26ea9d2c56" path="/var/lib/kubelet/pods/934b652c-ef02-4e1b-8043-1d26ea9d2c56/volumes" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.468265 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d11bfc17-4e06-403a-8632-87b168d498cf" path="/var/lib/kubelet/pods/d11bfc17-4e06-403a-8632-87b168d498cf/volumes" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.469460 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb8a5762-1a9d-4011-a049-592cd5f33244" path="/var/lib/kubelet/pods/fb8a5762-1a9d-4011-a049-592cd5f33244/volumes" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.470554 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe241ed5-4a42-4178-8756-513449190b6b" path="/var/lib/kubelet/pods/fe241ed5-4a42-4178-8756-513449190b6b/volumes" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.506971 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:06 crc kubenswrapper[5032]: I0122 16:11:06.773134 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5nnrt"] Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.406023 5032 generic.go:334] "Generic (PLEG): container finished" podID="1daf631c-f05c-4a21-acf1-1bf108a35cc9" containerID="d44aeb2c33cd31b41afca118be39e63a1c1fd314ddd93be7224d224c1623994e" exitCode=0 Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.406088 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbjpb" event={"ID":"1daf631c-f05c-4a21-acf1-1bf108a35cc9","Type":"ContainerDied","Data":"d44aeb2c33cd31b41afca118be39e63a1c1fd314ddd93be7224d224c1623994e"} Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.406148 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbjpb" event={"ID":"1daf631c-f05c-4a21-acf1-1bf108a35cc9","Type":"ContainerStarted","Data":"5211679beaf9d90e546f9dbcf5030c8e3097638b1f02dc753364ede73c2bed5e"} Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.408031 5032 generic.go:334] "Generic (PLEG): container finished" podID="66a6f462-61b5-4b57-af6f-858294b300eb" containerID="67e5da849d5ddc34d211ac57c737dcd64149d22b2dda6db11c6e79fd278456f5" exitCode=0 Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.408082 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5nnrt" event={"ID":"66a6f462-61b5-4b57-af6f-858294b300eb","Type":"ContainerDied","Data":"67e5da849d5ddc34d211ac57c737dcd64149d22b2dda6db11c6e79fd278456f5"} Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.408112 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5nnrt" event={"ID":"66a6f462-61b5-4b57-af6f-858294b300eb","Type":"ContainerStarted","Data":"772d48f96674b78cdc4118147c557f738a1fbabbc217254b0f1410037aef206b"} Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.990164 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wqp6k"] Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.996405 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:07 crc kubenswrapper[5032]: I0122 16:11:07.997692 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wqp6k"] Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.007669 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.014027 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-utilities\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.014208 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-catalog-content\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.014306 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-288wv\" (UniqueName: \"kubernetes.io/projected/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-kube-api-access-288wv\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.115185 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-288wv\" (UniqueName: \"kubernetes.io/projected/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-kube-api-access-288wv\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.115255 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-utilities\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.115301 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-catalog-content\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.115681 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-catalog-content\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.117308 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-utilities\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.135600 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-288wv\" (UniqueName: \"kubernetes.io/projected/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-kube-api-access-288wv\") pod \"redhat-operators-wqp6k\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.337558 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.425734 5032 generic.go:334] "Generic (PLEG): container finished" podID="1daf631c-f05c-4a21-acf1-1bf108a35cc9" containerID="2d694ac5fa195baff6e20711de704d7880c34c728f875e5a8fe0bac359f0e77a" exitCode=0 Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.426118 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbjpb" event={"ID":"1daf631c-f05c-4a21-acf1-1bf108a35cc9","Type":"ContainerDied","Data":"2d694ac5fa195baff6e20711de704d7880c34c728f875e5a8fe0bac359f0e77a"} Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.433520 5032 generic.go:334] "Generic (PLEG): container finished" podID="66a6f462-61b5-4b57-af6f-858294b300eb" containerID="bc2bb9eb1245fcacca712126b2f2c47b322a38b3c49c0cdde66c6f7cd3eab203" exitCode=0 Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.433682 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5nnrt" event={"ID":"66a6f462-61b5-4b57-af6f-858294b300eb","Type":"ContainerDied","Data":"bc2bb9eb1245fcacca712126b2f2c47b322a38b3c49c0cdde66c6f7cd3eab203"} Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.579577 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7st44"] Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.580509 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.583091 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.594918 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7st44"] Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.643215 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-catalog-content\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.643720 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56smc\" (UniqueName: \"kubernetes.io/projected/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-kube-api-access-56smc\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.643774 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-utilities\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.748848 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-catalog-content\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.748942 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56smc\" (UniqueName: \"kubernetes.io/projected/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-kube-api-access-56smc\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.748982 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-utilities\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.749592 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-catalog-content\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.749635 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-utilities\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.758430 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wqp6k"] Jan 22 16:11:08 crc kubenswrapper[5032]: W0122 16:11:08.764193 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5b5d9ec_ecdf_4afe_b75d_2b29f32e0385.slice/crio-6ef17ab82e64564e0abab261d587eeab3903e57cb408578c77b5234564ae7230 WatchSource:0}: Error finding container 6ef17ab82e64564e0abab261d587eeab3903e57cb408578c77b5234564ae7230: Status 404 returned error can't find the container with id 6ef17ab82e64564e0abab261d587eeab3903e57cb408578c77b5234564ae7230 Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.773639 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56smc\" (UniqueName: \"kubernetes.io/projected/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-kube-api-access-56smc\") pod \"community-operators-7st44\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.902584 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.919905 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-qz6dh" Jan 22 16:11:08 crc kubenswrapper[5032]: I0122 16:11:08.976239 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ndwb9"] Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.174661 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7st44"] Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.439963 5032 generic.go:334] "Generic (PLEG): container finished" podID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerID="b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759" exitCode=0 Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.440298 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqp6k" event={"ID":"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385","Type":"ContainerDied","Data":"b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759"} Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.440480 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqp6k" event={"ID":"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385","Type":"ContainerStarted","Data":"6ef17ab82e64564e0abab261d587eeab3903e57cb408578c77b5234564ae7230"} Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.442181 5032 generic.go:334] "Generic (PLEG): container finished" podID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerID="18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71" exitCode=0 Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.442258 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7st44" event={"ID":"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382","Type":"ContainerDied","Data":"18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71"} Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.442279 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7st44" event={"ID":"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382","Type":"ContainerStarted","Data":"819a438297d3e18675c8ea80b7b562c786956a3b07bdae09931a6a616c5452ee"} Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.454285 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5nnrt" event={"ID":"66a6f462-61b5-4b57-af6f-858294b300eb","Type":"ContainerStarted","Data":"e376001bc52b3cd612659e9d36a330c208382a446394ea4c8f1df4fa7a1d61a9"} Jan 22 16:11:09 crc kubenswrapper[5032]: I0122 16:11:09.511183 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5nnrt" podStartSLOduration=2.106250137 podStartE2EDuration="3.511164208s" podCreationTimestamp="2026-01-22 16:11:06 +0000 UTC" firstStartedPulling="2026-01-22 16:11:07.410248448 +0000 UTC m=+371.222417499" lastFinishedPulling="2026-01-22 16:11:08.815162539 +0000 UTC m=+372.627331570" observedRunningTime="2026-01-22 16:11:09.50988007 +0000 UTC m=+373.322049101" watchObservedRunningTime="2026-01-22 16:11:09.511164208 +0000 UTC m=+373.323333239" Jan 22 16:11:10 crc kubenswrapper[5032]: I0122 16:11:10.464251 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7st44" event={"ID":"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382","Type":"ContainerStarted","Data":"ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286"} Jan 22 16:11:10 crc kubenswrapper[5032]: I0122 16:11:10.467519 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbjpb" event={"ID":"1daf631c-f05c-4a21-acf1-1bf108a35cc9","Type":"ContainerStarted","Data":"a3f5c281b2af9f69bc944c4df30926c8feeb203fb21fb9183ee344ccbb1f5afe"} Jan 22 16:11:10 crc kubenswrapper[5032]: I0122 16:11:10.475120 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqp6k" event={"ID":"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385","Type":"ContainerStarted","Data":"5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f"} Jan 22 16:11:10 crc kubenswrapper[5032]: I0122 16:11:10.539487 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pbjpb" podStartSLOduration=3.578336487 podStartE2EDuration="5.539469986s" podCreationTimestamp="2026-01-22 16:11:05 +0000 UTC" firstStartedPulling="2026-01-22 16:11:07.407699022 +0000 UTC m=+371.219868053" lastFinishedPulling="2026-01-22 16:11:09.368832521 +0000 UTC m=+373.181001552" observedRunningTime="2026-01-22 16:11:10.537631051 +0000 UTC m=+374.349800082" watchObservedRunningTime="2026-01-22 16:11:10.539469986 +0000 UTC m=+374.351639017" Jan 22 16:11:11 crc kubenswrapper[5032]: I0122 16:11:11.484536 5032 generic.go:334] "Generic (PLEG): container finished" podID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerID="ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286" exitCode=0 Jan 22 16:11:11 crc kubenswrapper[5032]: I0122 16:11:11.485030 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7st44" event={"ID":"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382","Type":"ContainerDied","Data":"ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286"} Jan 22 16:11:11 crc kubenswrapper[5032]: I0122 16:11:11.489251 5032 generic.go:334] "Generic (PLEG): container finished" podID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerID="5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f" exitCode=0 Jan 22 16:11:11 crc kubenswrapper[5032]: I0122 16:11:11.491721 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqp6k" event={"ID":"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385","Type":"ContainerDied","Data":"5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f"} Jan 22 16:11:12 crc kubenswrapper[5032]: I0122 16:11:12.496290 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7st44" event={"ID":"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382","Type":"ContainerStarted","Data":"07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635"} Jan 22 16:11:12 crc kubenswrapper[5032]: I0122 16:11:12.500208 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqp6k" event={"ID":"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385","Type":"ContainerStarted","Data":"8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d"} Jan 22 16:11:12 crc kubenswrapper[5032]: I0122 16:11:12.516758 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7st44" podStartSLOduration=1.8038290300000002 podStartE2EDuration="4.516736424s" podCreationTimestamp="2026-01-22 16:11:08 +0000 UTC" firstStartedPulling="2026-01-22 16:11:09.444424386 +0000 UTC m=+373.256593417" lastFinishedPulling="2026-01-22 16:11:12.15733177 +0000 UTC m=+375.969500811" observedRunningTime="2026-01-22 16:11:12.513826047 +0000 UTC m=+376.325995078" watchObservedRunningTime="2026-01-22 16:11:12.516736424 +0000 UTC m=+376.328905465" Jan 22 16:11:15 crc kubenswrapper[5032]: I0122 16:11:15.912422 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:15 crc kubenswrapper[5032]: I0122 16:11:15.912995 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:15 crc kubenswrapper[5032]: I0122 16:11:15.969597 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:15 crc kubenswrapper[5032]: I0122 16:11:15.986580 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wqp6k" podStartSLOduration=6.327042158 podStartE2EDuration="8.986561827s" podCreationTimestamp="2026-01-22 16:11:07 +0000 UTC" firstStartedPulling="2026-01-22 16:11:09.445768046 +0000 UTC m=+373.257937067" lastFinishedPulling="2026-01-22 16:11:12.105287665 +0000 UTC m=+375.917456736" observedRunningTime="2026-01-22 16:11:12.531307737 +0000 UTC m=+376.343476768" watchObservedRunningTime="2026-01-22 16:11:15.986561827 +0000 UTC m=+379.798730858" Jan 22 16:11:16 crc kubenswrapper[5032]: I0122 16:11:16.507750 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:16 crc kubenswrapper[5032]: I0122 16:11:16.508200 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:16 crc kubenswrapper[5032]: I0122 16:11:16.563758 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:16 crc kubenswrapper[5032]: I0122 16:11:16.610149 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pbjpb" Jan 22 16:11:16 crc kubenswrapper[5032]: I0122 16:11:16.629027 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:11:17 crc kubenswrapper[5032]: I0122 16:11:17.802147 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-857459b668-qch7z"] Jan 22 16:11:17 crc kubenswrapper[5032]: I0122 16:11:17.802574 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" podUID="f4570b56-a88c-4640-9226-f287c8e6e5a7" containerName="controller-manager" containerID="cri-o://b8db30da7c119c87d5a4069dfd72e8ca28bca3a43cee81b1b9d58ece7b5099a3" gracePeriod=30 Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.338003 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.338045 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.374161 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.554815 5032 generic.go:334] "Generic (PLEG): container finished" podID="f4570b56-a88c-4640-9226-f287c8e6e5a7" containerID="b8db30da7c119c87d5a4069dfd72e8ca28bca3a43cee81b1b9d58ece7b5099a3" exitCode=0 Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.554904 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" event={"ID":"f4570b56-a88c-4640-9226-f287c8e6e5a7","Type":"ContainerDied","Data":"b8db30da7c119c87d5a4069dfd72e8ca28bca3a43cee81b1b9d58ece7b5099a3"} Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.604732 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.902752 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.902798 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:18 crc kubenswrapper[5032]: I0122 16:11:18.947245 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.618230 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7st44" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.743239 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.772264 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-v4pzm"] Jan 22 16:11:19 crc kubenswrapper[5032]: E0122 16:11:19.772682 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4570b56-a88c-4640-9226-f287c8e6e5a7" containerName="controller-manager" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.772786 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4570b56-a88c-4640-9226-f287c8e6e5a7" containerName="controller-manager" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.772951 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4570b56-a88c-4640-9226-f287c8e6e5a7" containerName="controller-manager" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.773390 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.786916 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-v4pzm"] Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.813653 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4570b56-a88c-4640-9226-f287c8e6e5a7-serving-cert\") pod \"f4570b56-a88c-4640-9226-f287c8e6e5a7\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.813759 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlgq9\" (UniqueName: \"kubernetes.io/projected/f4570b56-a88c-4640-9226-f287c8e6e5a7-kube-api-access-hlgq9\") pod \"f4570b56-a88c-4640-9226-f287c8e6e5a7\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.813815 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-config\") pod \"f4570b56-a88c-4640-9226-f287c8e6e5a7\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.813837 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-client-ca\") pod \"f4570b56-a88c-4640-9226-f287c8e6e5a7\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.813890 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-proxy-ca-bundles\") pod \"f4570b56-a88c-4640-9226-f287c8e6e5a7\" (UID: \"f4570b56-a88c-4640-9226-f287c8e6e5a7\") " Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.814049 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-proxy-ca-bundles\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.814086 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-client-ca\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.814987 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-config" (OuterVolumeSpecName: "config") pod "f4570b56-a88c-4640-9226-f287c8e6e5a7" (UID: "f4570b56-a88c-4640-9226-f287c8e6e5a7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.815074 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b2a88de-9744-43b7-9666-d54566d87d50-serving-cert\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.815166 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-config\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.815270 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fd6x\" (UniqueName: \"kubernetes.io/projected/6b2a88de-9744-43b7-9666-d54566d87d50-kube-api-access-5fd6x\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.815322 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.815332 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f4570b56-a88c-4640-9226-f287c8e6e5a7" (UID: "f4570b56-a88c-4640-9226-f287c8e6e5a7"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.816141 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-client-ca" (OuterVolumeSpecName: "client-ca") pod "f4570b56-a88c-4640-9226-f287c8e6e5a7" (UID: "f4570b56-a88c-4640-9226-f287c8e6e5a7"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.828122 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4570b56-a88c-4640-9226-f287c8e6e5a7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f4570b56-a88c-4640-9226-f287c8e6e5a7" (UID: "f4570b56-a88c-4640-9226-f287c8e6e5a7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.828173 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4570b56-a88c-4640-9226-f287c8e6e5a7-kube-api-access-hlgq9" (OuterVolumeSpecName: "kube-api-access-hlgq9") pod "f4570b56-a88c-4640-9226-f287c8e6e5a7" (UID: "f4570b56-a88c-4640-9226-f287c8e6e5a7"). InnerVolumeSpecName "kube-api-access-hlgq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916406 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fd6x\" (UniqueName: \"kubernetes.io/projected/6b2a88de-9744-43b7-9666-d54566d87d50-kube-api-access-5fd6x\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916469 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-proxy-ca-bundles\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916504 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-client-ca\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916533 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b2a88de-9744-43b7-9666-d54566d87d50-serving-cert\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916558 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-config\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916606 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlgq9\" (UniqueName: \"kubernetes.io/projected/f4570b56-a88c-4640-9226-f287c8e6e5a7-kube-api-access-hlgq9\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916621 5032 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916634 5032 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4570b56-a88c-4640-9226-f287c8e6e5a7-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.916645 5032 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4570b56-a88c-4640-9226-f287c8e6e5a7-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.917652 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-client-ca\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.918034 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-config\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.918661 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b2a88de-9744-43b7-9666-d54566d87d50-proxy-ca-bundles\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.919995 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b2a88de-9744-43b7-9666-d54566d87d50-serving-cert\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:19 crc kubenswrapper[5032]: I0122 16:11:19.932917 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fd6x\" (UniqueName: \"kubernetes.io/projected/6b2a88de-9744-43b7-9666-d54566d87d50-kube-api-access-5fd6x\") pod \"controller-manager-58679666bb-v4pzm\" (UID: \"6b2a88de-9744-43b7-9666-d54566d87d50\") " pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.088494 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.343854 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58679666bb-v4pzm"] Jan 22 16:11:20 crc kubenswrapper[5032]: W0122 16:11:20.346454 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b2a88de_9744_43b7_9666_d54566d87d50.slice/crio-7f7f528303ca8e7672ab737b9bf9abe67a41c7afcc44e353eac0319edee5c8d0 WatchSource:0}: Error finding container 7f7f528303ca8e7672ab737b9bf9abe67a41c7afcc44e353eac0319edee5c8d0: Status 404 returned error can't find the container with id 7f7f528303ca8e7672ab737b9bf9abe67a41c7afcc44e353eac0319edee5c8d0 Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.565668 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" event={"ID":"6b2a88de-9744-43b7-9666-d54566d87d50","Type":"ContainerStarted","Data":"7f7f528303ca8e7672ab737b9bf9abe67a41c7afcc44e353eac0319edee5c8d0"} Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.567115 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" event={"ID":"f4570b56-a88c-4640-9226-f287c8e6e5a7","Type":"ContainerDied","Data":"f417ba887fc672032b2c696c9f9e6ddf53e4c01e27725cce35e819a9738512f4"} Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.567119 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857459b668-qch7z" Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.567163 5032 scope.go:117] "RemoveContainer" containerID="b8db30da7c119c87d5a4069dfd72e8ca28bca3a43cee81b1b9d58ece7b5099a3" Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.588625 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-857459b668-qch7z"] Jan 22 16:11:20 crc kubenswrapper[5032]: I0122 16:11:20.591537 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-857459b668-qch7z"] Jan 22 16:11:21 crc kubenswrapper[5032]: I0122 16:11:21.574838 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" event={"ID":"6b2a88de-9744-43b7-9666-d54566d87d50","Type":"ContainerStarted","Data":"5b4615538c589727c6fc57b4f873064713e3c611aadaf9c94953a4cb0daeca07"} Jan 22 16:11:21 crc kubenswrapper[5032]: I0122 16:11:21.575235 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:21 crc kubenswrapper[5032]: I0122 16:11:21.580036 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" Jan 22 16:11:21 crc kubenswrapper[5032]: I0122 16:11:21.595282 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58679666bb-v4pzm" podStartSLOduration=4.595267638 podStartE2EDuration="4.595267638s" podCreationTimestamp="2026-01-22 16:11:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:11:21.590315701 +0000 UTC m=+385.402484732" watchObservedRunningTime="2026-01-22 16:11:21.595267638 +0000 UTC m=+385.407436669" Jan 22 16:11:22 crc kubenswrapper[5032]: I0122 16:11:22.462454 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4570b56-a88c-4640-9226-f287c8e6e5a7" path="/var/lib/kubelet/pods/f4570b56-a88c-4640-9226-f287c8e6e5a7/volumes" Jan 22 16:11:33 crc kubenswrapper[5032]: I0122 16:11:33.016961 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:11:33 crc kubenswrapper[5032]: I0122 16:11:33.017550 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:11:34 crc kubenswrapper[5032]: I0122 16:11:34.059755 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" podUID="e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" containerName="registry" containerID="cri-o://3ac5e38c1d8b49cace9305ae211cfded3bc36c43805cb92478fcf5d5f8d50b6d" gracePeriod=30 Jan 22 16:11:36 crc kubenswrapper[5032]: I0122 16:11:36.684277 5032 generic.go:334] "Generic (PLEG): container finished" podID="e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" containerID="3ac5e38c1d8b49cace9305ae211cfded3bc36c43805cb92478fcf5d5f8d50b6d" exitCode=0 Jan 22 16:11:36 crc kubenswrapper[5032]: I0122 16:11:36.684413 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" event={"ID":"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4","Type":"ContainerDied","Data":"3ac5e38c1d8b49cace9305ae211cfded3bc36c43805cb92478fcf5d5f8d50b6d"} Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.265665 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.416518 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdkst\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-kube-api-access-cdkst\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.416623 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-certificates\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.416794 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.416893 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-trusted-ca\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.416920 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-bound-sa-token\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.416942 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-installation-pull-secrets\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.416988 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-ca-trust-extracted\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.417008 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-tls\") pod \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\" (UID: \"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4\") " Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.417746 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.418595 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.425565 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.425836 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.426299 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-kube-api-access-cdkst" (OuterVolumeSpecName: "kube-api-access-cdkst") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "kube-api-access-cdkst". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.431786 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.432491 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.440851 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" (UID: "e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.518718 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdkst\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-kube-api-access-cdkst\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.518766 5032 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.518783 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.518796 5032 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.518808 5032 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.518819 5032 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.518830 5032 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.693183 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" event={"ID":"e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4","Type":"ContainerDied","Data":"5601b7894cfa24827bb2ccf67e807eef58bb6974d59624892564b35f52a6b484"} Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.693293 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ndwb9" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.693349 5032 scope.go:117] "RemoveContainer" containerID="3ac5e38c1d8b49cace9305ae211cfded3bc36c43805cb92478fcf5d5f8d50b6d" Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.745970 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ndwb9"] Jan 22 16:11:37 crc kubenswrapper[5032]: I0122 16:11:37.755505 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ndwb9"] Jan 22 16:11:38 crc kubenswrapper[5032]: I0122 16:11:38.468610 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" path="/var/lib/kubelet/pods/e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4/volumes" Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.017207 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.017785 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.017852 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.018676 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"01e3cd0aa7557a5667d74b209043ea407f503a2a1b2f1770d7995dbb3963da89"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.019028 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://01e3cd0aa7557a5667d74b209043ea407f503a2a1b2f1770d7995dbb3963da89" gracePeriod=600 Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.875694 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="01e3cd0aa7557a5667d74b209043ea407f503a2a1b2f1770d7995dbb3963da89" exitCode=0 Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.875843 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"01e3cd0aa7557a5667d74b209043ea407f503a2a1b2f1770d7995dbb3963da89"} Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.876048 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"3d9ae4c80605a2263a3c38f7ff610faacbf3ed02dfd61676693b0cc5bc82a3c4"} Jan 22 16:12:03 crc kubenswrapper[5032]: I0122 16:12:03.876074 5032 scope.go:117] "RemoveContainer" containerID="cc84d2eb65a8836ab5a465e944e2675486a38f991b9300b09363b64be6217653" Jan 22 16:14:03 crc kubenswrapper[5032]: I0122 16:14:03.016974 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:14:03 crc kubenswrapper[5032]: I0122 16:14:03.017406 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:14:33 crc kubenswrapper[5032]: I0122 16:14:33.016608 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:14:33 crc kubenswrapper[5032]: I0122 16:14:33.017450 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.184916 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v"] Jan 22 16:15:00 crc kubenswrapper[5032]: E0122 16:15:00.185820 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" containerName="registry" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.185837 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" containerName="registry" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.186060 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2ab81eb-2cbd-4134-8d13-e591bbd8bfb4" containerName="registry" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.186560 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.191654 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.191825 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.197514 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v"] Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.295912 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-secret-volume\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.295995 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbkx6\" (UniqueName: \"kubernetes.io/projected/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-kube-api-access-nbkx6\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.296173 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-config-volume\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.397991 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-secret-volume\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.398308 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbkx6\" (UniqueName: \"kubernetes.io/projected/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-kube-api-access-nbkx6\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.398373 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-config-volume\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.400475 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-config-volume\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.413262 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-secret-volume\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.422884 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbkx6\" (UniqueName: \"kubernetes.io/projected/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-kube-api-access-nbkx6\") pod \"collect-profiles-29484975-94q7v\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.517625 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:00 crc kubenswrapper[5032]: I0122 16:15:00.790769 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v"] Jan 22 16:15:01 crc kubenswrapper[5032]: I0122 16:15:01.158353 5032 generic.go:334] "Generic (PLEG): container finished" podID="8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" containerID="88ef6b06f7af604c85265e919a71b300549c9a92c6522d45f42e9d4c747c8b77" exitCode=0 Jan 22 16:15:01 crc kubenswrapper[5032]: I0122 16:15:01.158478 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" event={"ID":"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f","Type":"ContainerDied","Data":"88ef6b06f7af604c85265e919a71b300549c9a92c6522d45f42e9d4c747c8b77"} Jan 22 16:15:01 crc kubenswrapper[5032]: I0122 16:15:01.158815 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" event={"ID":"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f","Type":"ContainerStarted","Data":"007f373d56ed5c4f0abb1967fec095896b8bb6150fd12e1f25b1d920e5c2c9c6"} Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.413329 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.531722 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbkx6\" (UniqueName: \"kubernetes.io/projected/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-kube-api-access-nbkx6\") pod \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.532479 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-config-volume\") pod \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.532590 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-secret-volume\") pod \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\" (UID: \"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f\") " Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.534017 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-config-volume" (OuterVolumeSpecName: "config-volume") pod "8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" (UID: "8ab0b48d-dd7e-42f2-94a1-1be5a35b967f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.543321 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" (UID: "8ab0b48d-dd7e-42f2-94a1-1be5a35b967f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.543740 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-kube-api-access-nbkx6" (OuterVolumeSpecName: "kube-api-access-nbkx6") pod "8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" (UID: "8ab0b48d-dd7e-42f2-94a1-1be5a35b967f"). InnerVolumeSpecName "kube-api-access-nbkx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.634889 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.634932 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbkx6\" (UniqueName: \"kubernetes.io/projected/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-kube-api-access-nbkx6\") on node \"crc\" DevicePath \"\"" Jan 22 16:15:02 crc kubenswrapper[5032]: I0122 16:15:02.634946 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.017113 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.017212 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.017278 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.018184 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d9ae4c80605a2263a3c38f7ff610faacbf3ed02dfd61676693b0cc5bc82a3c4"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.018299 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://3d9ae4c80605a2263a3c38f7ff610faacbf3ed02dfd61676693b0cc5bc82a3c4" gracePeriod=600 Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.179383 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" event={"ID":"8ab0b48d-dd7e-42f2-94a1-1be5a35b967f","Type":"ContainerDied","Data":"007f373d56ed5c4f0abb1967fec095896b8bb6150fd12e1f25b1d920e5c2c9c6"} Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.179632 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="007f373d56ed5c4f0abb1967fec095896b8bb6150fd12e1f25b1d920e5c2c9c6" Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.179411 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v" Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.183230 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="3d9ae4c80605a2263a3c38f7ff610faacbf3ed02dfd61676693b0cc5bc82a3c4" exitCode=0 Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.183293 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"3d9ae4c80605a2263a3c38f7ff610faacbf3ed02dfd61676693b0cc5bc82a3c4"} Jan 22 16:15:03 crc kubenswrapper[5032]: I0122 16:15:03.183353 5032 scope.go:117] "RemoveContainer" containerID="01e3cd0aa7557a5667d74b209043ea407f503a2a1b2f1770d7995dbb3963da89" Jan 22 16:15:04 crc kubenswrapper[5032]: I0122 16:15:04.190249 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"d1c2cb7c0f9349c17ae9e8e3a56654bf423570a8cdc5ea89c7a9b6e4747682cd"} Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.986954 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-96lql"] Jan 22 16:16:00 crc kubenswrapper[5032]: E0122 16:16:00.987840 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" containerName="collect-profiles" Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.987890 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" containerName="collect-profiles" Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.988040 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" containerName="collect-profiles" Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.988642 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-96lql" Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.990022 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p"] Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.994627 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.997312 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.997616 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 22 16:16:00 crc kubenswrapper[5032]: I0122 16:16:00.997972 5032 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-zfz8z" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.000013 5032 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-dsw4k" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.004149 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p"] Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.012306 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-96lql"] Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.022178 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-skzfx"] Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.023168 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.027152 5032 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-pbckl" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.038985 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-skzfx"] Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.060420 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmnj8\" (UniqueName: \"kubernetes.io/projected/6a0ca471-23dc-4eac-9c3a-0adfc1c0a003-kube-api-access-fmnj8\") pod \"cert-manager-webhook-687f57d79b-skzfx\" (UID: \"6a0ca471-23dc-4eac-9c3a-0adfc1c0a003\") " pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.060492 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9wlc\" (UniqueName: \"kubernetes.io/projected/147a9188-17a7-4cf4-91d1-ca1f033f2da6-kube-api-access-r9wlc\") pod \"cert-manager-858654f9db-96lql\" (UID: \"147a9188-17a7-4cf4-91d1-ca1f033f2da6\") " pod="cert-manager/cert-manager-858654f9db-96lql" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.060760 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8ftw\" (UniqueName: \"kubernetes.io/projected/631e824a-b3b5-4876-aefd-cda7db40252a-kube-api-access-m8ftw\") pod \"cert-manager-cainjector-cf98fcc89-ccw7p\" (UID: \"631e824a-b3b5-4876-aefd-cda7db40252a\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.162303 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8ftw\" (UniqueName: \"kubernetes.io/projected/631e824a-b3b5-4876-aefd-cda7db40252a-kube-api-access-m8ftw\") pod \"cert-manager-cainjector-cf98fcc89-ccw7p\" (UID: \"631e824a-b3b5-4876-aefd-cda7db40252a\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.164196 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmnj8\" (UniqueName: \"kubernetes.io/projected/6a0ca471-23dc-4eac-9c3a-0adfc1c0a003-kube-api-access-fmnj8\") pod \"cert-manager-webhook-687f57d79b-skzfx\" (UID: \"6a0ca471-23dc-4eac-9c3a-0adfc1c0a003\") " pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.164341 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9wlc\" (UniqueName: \"kubernetes.io/projected/147a9188-17a7-4cf4-91d1-ca1f033f2da6-kube-api-access-r9wlc\") pod \"cert-manager-858654f9db-96lql\" (UID: \"147a9188-17a7-4cf4-91d1-ca1f033f2da6\") " pod="cert-manager/cert-manager-858654f9db-96lql" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.183415 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmnj8\" (UniqueName: \"kubernetes.io/projected/6a0ca471-23dc-4eac-9c3a-0adfc1c0a003-kube-api-access-fmnj8\") pod \"cert-manager-webhook-687f57d79b-skzfx\" (UID: \"6a0ca471-23dc-4eac-9c3a-0adfc1c0a003\") " pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.186241 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9wlc\" (UniqueName: \"kubernetes.io/projected/147a9188-17a7-4cf4-91d1-ca1f033f2da6-kube-api-access-r9wlc\") pod \"cert-manager-858654f9db-96lql\" (UID: \"147a9188-17a7-4cf4-91d1-ca1f033f2da6\") " pod="cert-manager/cert-manager-858654f9db-96lql" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.192031 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8ftw\" (UniqueName: \"kubernetes.io/projected/631e824a-b3b5-4876-aefd-cda7db40252a-kube-api-access-m8ftw\") pod \"cert-manager-cainjector-cf98fcc89-ccw7p\" (UID: \"631e824a-b3b5-4876-aefd-cda7db40252a\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.314639 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-96lql" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.321584 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.345131 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.569394 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-96lql"] Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.577816 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.614592 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-96lql" event={"ID":"147a9188-17a7-4cf4-91d1-ca1f033f2da6","Type":"ContainerStarted","Data":"7a61c92867fb5106c9d77825b7aa51c5da0c0ed6677822c81c88aa8d496b7d12"} Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.857728 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p"] Jan 22 16:16:01 crc kubenswrapper[5032]: I0122 16:16:01.862055 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-skzfx"] Jan 22 16:16:01 crc kubenswrapper[5032]: W0122 16:16:01.865370 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod631e824a_b3b5_4876_aefd_cda7db40252a.slice/crio-eeb7f0c259b64ee47478fd06ec69567c08adb546a617ed2de125f87d350e1672 WatchSource:0}: Error finding container eeb7f0c259b64ee47478fd06ec69567c08adb546a617ed2de125f87d350e1672: Status 404 returned error can't find the container with id eeb7f0c259b64ee47478fd06ec69567c08adb546a617ed2de125f87d350e1672 Jan 22 16:16:01 crc kubenswrapper[5032]: W0122 16:16:01.866723 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a0ca471_23dc_4eac_9c3a_0adfc1c0a003.slice/crio-80f076480a25e24cc047cd1439a13042652a006e678afd257374d0d252772f6d WatchSource:0}: Error finding container 80f076480a25e24cc047cd1439a13042652a006e678afd257374d0d252772f6d: Status 404 returned error can't find the container with id 80f076480a25e24cc047cd1439a13042652a006e678afd257374d0d252772f6d Jan 22 16:16:02 crc kubenswrapper[5032]: I0122 16:16:02.624326 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" event={"ID":"6a0ca471-23dc-4eac-9c3a-0adfc1c0a003","Type":"ContainerStarted","Data":"80f076480a25e24cc047cd1439a13042652a006e678afd257374d0d252772f6d"} Jan 22 16:16:02 crc kubenswrapper[5032]: I0122 16:16:02.625238 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" event={"ID":"631e824a-b3b5-4876-aefd-cda7db40252a","Type":"ContainerStarted","Data":"eeb7f0c259b64ee47478fd06ec69567c08adb546a617ed2de125f87d350e1672"} Jan 22 16:16:04 crc kubenswrapper[5032]: I0122 16:16:04.644558 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-96lql" event={"ID":"147a9188-17a7-4cf4-91d1-ca1f033f2da6","Type":"ContainerStarted","Data":"8944f07581ace0eefbcdd4cff39c1d104df5943e60c5d3b5e74a3179ef0e3dea"} Jan 22 16:16:04 crc kubenswrapper[5032]: I0122 16:16:04.679738 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-96lql" podStartSLOduration=2.224057537 podStartE2EDuration="4.679705464s" podCreationTimestamp="2026-01-22 16:16:00 +0000 UTC" firstStartedPulling="2026-01-22 16:16:01.57762875 +0000 UTC m=+665.389797781" lastFinishedPulling="2026-01-22 16:16:04.033276667 +0000 UTC m=+667.845445708" observedRunningTime="2026-01-22 16:16:04.666067608 +0000 UTC m=+668.478236649" watchObservedRunningTime="2026-01-22 16:16:04.679705464 +0000 UTC m=+668.491874495" Jan 22 16:16:05 crc kubenswrapper[5032]: I0122 16:16:05.653452 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" event={"ID":"631e824a-b3b5-4876-aefd-cda7db40252a","Type":"ContainerStarted","Data":"b1f5715bd446e1b1cd7913abf2b441199d240ab13a2ef63ac85b0e1a3ae339dd"} Jan 22 16:16:05 crc kubenswrapper[5032]: I0122 16:16:05.659334 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" event={"ID":"6a0ca471-23dc-4eac-9c3a-0adfc1c0a003","Type":"ContainerStarted","Data":"d9c5cb662e38195e0c41d7b470bfc837d2af7d68015e49657f29695c0e96700c"} Jan 22 16:16:05 crc kubenswrapper[5032]: I0122 16:16:05.659377 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" Jan 22 16:16:05 crc kubenswrapper[5032]: I0122 16:16:05.680406 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-ccw7p" podStartSLOduration=2.335307338 podStartE2EDuration="5.680380044s" podCreationTimestamp="2026-01-22 16:16:00 +0000 UTC" firstStartedPulling="2026-01-22 16:16:01.867764087 +0000 UTC m=+665.679933118" lastFinishedPulling="2026-01-22 16:16:05.212836793 +0000 UTC m=+669.025005824" observedRunningTime="2026-01-22 16:16:05.67848966 +0000 UTC m=+669.490658731" watchObservedRunningTime="2026-01-22 16:16:05.680380044 +0000 UTC m=+669.492549085" Jan 22 16:16:05 crc kubenswrapper[5032]: I0122 16:16:05.719734 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" podStartSLOduration=2.431926094 podStartE2EDuration="5.719699567s" podCreationTimestamp="2026-01-22 16:16:00 +0000 UTC" firstStartedPulling="2026-01-22 16:16:01.869770824 +0000 UTC m=+665.681939865" lastFinishedPulling="2026-01-22 16:16:05.157544307 +0000 UTC m=+668.969713338" observedRunningTime="2026-01-22 16:16:05.718593126 +0000 UTC m=+669.530762167" watchObservedRunningTime="2026-01-22 16:16:05.719699567 +0000 UTC m=+669.531868608" Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.895412 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5q2w8"] Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.896602 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-controller" containerID="cri-o://e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" gracePeriod=30 Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.896765 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-acl-logging" containerID="cri-o://f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" gracePeriod=30 Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.896698 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="northd" containerID="cri-o://2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" gracePeriod=30 Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.896737 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-node" containerID="cri-o://0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" gracePeriod=30 Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.896768 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="sbdb" containerID="cri-o://9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" gracePeriod=30 Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.896781 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="nbdb" containerID="cri-o://48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" gracePeriod=30 Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.897004 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" gracePeriod=30 Jan 22 16:16:10 crc kubenswrapper[5032]: I0122 16:16:10.928485 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" containerID="cri-o://98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" gracePeriod=30 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.349409 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.684376 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/3.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.687969 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovn-acl-logging/0.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.688845 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovn-controller/0.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.689827 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.707344 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/2.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.708334 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/1.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.709370 5032 generic.go:334] "Generic (PLEG): container finished" podID="d111dfc5-e30e-477d-81c1-afcc5bd064ad" containerID="b3bdb803d72f4f9eba58e5a41df66631589f736b5541903eb85b9763e27abf86" exitCode=2 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.709512 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerDied","Data":"b3bdb803d72f4f9eba58e5a41df66631589f736b5541903eb85b9763e27abf86"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.709631 5032 scope.go:117] "RemoveContainer" containerID="38423f4de306a93cc1a78af3d1ad0e00982006a6b9991916d9ec20e428f3b827" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.710538 5032 scope.go:117] "RemoveContainer" containerID="b3bdb803d72f4f9eba58e5a41df66631589f736b5541903eb85b9763e27abf86" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.710947 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-zfz9c_openshift-multus(d111dfc5-e30e-477d-81c1-afcc5bd064ad)\"" pod="openshift-multus/multus-zfz9c" podUID="d111dfc5-e30e-477d-81c1-afcc5bd064ad" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.713323 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovnkube-controller/3.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715439 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-var-lib-openvswitch\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715469 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-ovn\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715491 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-env-overrides\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715516 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-openvswitch\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715535 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715578 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psnqf\" (UniqueName: \"kubernetes.io/projected/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-kube-api-access-psnqf\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715596 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-slash\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715616 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-systemd\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715632 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-netns\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715645 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-bin\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715683 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-log-socket\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715699 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-script-lib\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715725 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-kubelet\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715745 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovn-node-metrics-cert\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715760 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-node-log\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715781 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-etc-openvswitch\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715804 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-netd\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715818 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-config\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715810 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-slash" (OuterVolumeSpecName: "host-slash") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715890 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715833 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-systemd-units\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.715987 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-ovn-kubernetes\") pod \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\" (UID: \"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c\") " Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716360 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716436 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716488 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716594 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716649 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-log-socket" (OuterVolumeSpecName: "log-socket") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716727 5032 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-slash\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716746 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716761 5032 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716776 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716790 5032 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716816 5032 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716841 5032 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716897 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-node-log" (OuterVolumeSpecName: "node-log") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716941 5032 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716963 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.716997 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.717416 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.717518 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.717603 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.717634 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.718577 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.720625 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovn-acl-logging/0.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.721725 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5q2w8_ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/ovn-controller/0.log" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722224 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" exitCode=0 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722265 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" exitCode=0 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722285 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" exitCode=0 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722301 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" exitCode=0 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722314 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" exitCode=0 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722328 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" exitCode=0 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722340 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" exitCode=143 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722356 5032 generic.go:334] "Generic (PLEG): container finished" podID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerID="e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" exitCode=143 Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722367 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722415 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722435 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722447 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722461 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722471 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722484 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722495 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722504 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722512 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722518 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722524 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722533 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722539 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722547 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722553 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722560 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722569 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722575 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722581 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722586 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722592 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722597 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722602 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722432 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.722607 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723418 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723441 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723509 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723545 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723560 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723573 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723585 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723598 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723609 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723619 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723629 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723639 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723649 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723665 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5q2w8" event={"ID":"ba09835f-0f75-4266-9ae2-39dc3cb0fd4c","Type":"ContainerDied","Data":"fabc7337765c3f612d599754eb42ebdca39b3625135a9fe3c272f31428f3246e"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723692 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723704 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723715 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723725 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723736 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723746 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723756 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723766 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723776 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723787 5032 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.723826 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.724779 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-kube-api-access-psnqf" (OuterVolumeSpecName: "kube-api-access-psnqf") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "kube-api-access-psnqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.734098 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" (UID: "ba09835f-0f75-4266-9ae2-39dc3cb0fd4c"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798028 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-r42rs"] Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798616 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="nbdb" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798643 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="nbdb" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798659 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="sbdb" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798670 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="sbdb" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798690 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798701 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798719 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="northd" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798729 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="northd" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798745 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798755 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798778 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798787 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798802 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798812 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798825 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798835 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798852 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-node" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798896 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-node" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798909 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798920 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798932 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-acl-logging" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798942 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-acl-logging" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.798959 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kubecfg-setup" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.798970 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kubecfg-setup" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799125 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="sbdb" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799140 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799156 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799170 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799183 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="nbdb" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799197 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovn-acl-logging" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799210 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799224 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="northd" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799238 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="kube-rbac-proxy-node" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799255 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.799407 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799421 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799577 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.799595 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" containerName="ovnkube-controller" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.802004 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.802463 5032 scope.go:117] "RemoveContainer" containerID="98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.817801 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psnqf\" (UniqueName: \"kubernetes.io/projected/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-kube-api-access-psnqf\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.817843 5032 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.817902 5032 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.817919 5032 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-log-socket\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818043 5032 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818063 5032 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818079 5032 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-node-log\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818095 5032 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818110 5032 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818126 5032 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818139 5032 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818157 5032 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818171 5032 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.818186 5032 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.830890 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.857449 5032 scope.go:117] "RemoveContainer" containerID="9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.872625 5032 scope.go:117] "RemoveContainer" containerID="48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.892165 5032 scope.go:117] "RemoveContainer" containerID="2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.911380 5032 scope.go:117] "RemoveContainer" containerID="bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919547 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-systemd\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919637 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-systemd-units\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919686 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919724 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-slash\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919818 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-etc-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919886 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-run-ovn-kubernetes\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919934 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-var-lib-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919951 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovnkube-config\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.919973 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-node-log\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920038 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-kubelet\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920080 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-env-overrides\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920113 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovn-node-metrics-cert\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920130 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-cni-bin\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920187 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920239 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-ovn\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920286 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-log-socket\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920352 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-run-netns\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920426 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-cni-netd\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920468 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovnkube-script-lib\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.920506 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5njh6\" (UniqueName: \"kubernetes.io/projected/a1bd651e-300c-4b75-af4f-76d9b64bafff-kube-api-access-5njh6\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.925492 5032 scope.go:117] "RemoveContainer" containerID="0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.938323 5032 scope.go:117] "RemoveContainer" containerID="f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.952701 5032 scope.go:117] "RemoveContainer" containerID="e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.966958 5032 scope.go:117] "RemoveContainer" containerID="e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.982354 5032 scope.go:117] "RemoveContainer" containerID="98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.983036 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": container with ID starting with 98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e not found: ID does not exist" containerID="98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.983084 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} err="failed to get container status \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": rpc error: code = NotFound desc = could not find container \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": container with ID starting with 98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.983123 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.983555 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": container with ID starting with 4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2 not found: ID does not exist" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.983589 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} err="failed to get container status \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": rpc error: code = NotFound desc = could not find container \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": container with ID starting with 4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.983617 5032 scope.go:117] "RemoveContainer" containerID="9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.984040 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": container with ID starting with 9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc not found: ID does not exist" containerID="9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.984076 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} err="failed to get container status \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": rpc error: code = NotFound desc = could not find container \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": container with ID starting with 9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.984100 5032 scope.go:117] "RemoveContainer" containerID="48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.984539 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": container with ID starting with 48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134 not found: ID does not exist" containerID="48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.984570 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} err="failed to get container status \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": rpc error: code = NotFound desc = could not find container \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": container with ID starting with 48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.984596 5032 scope.go:117] "RemoveContainer" containerID="2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.985023 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": container with ID starting with 2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2 not found: ID does not exist" containerID="2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.985059 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} err="failed to get container status \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": rpc error: code = NotFound desc = could not find container \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": container with ID starting with 2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.985086 5032 scope.go:117] "RemoveContainer" containerID="bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.985401 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": container with ID starting with bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718 not found: ID does not exist" containerID="bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.985437 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} err="failed to get container status \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": rpc error: code = NotFound desc = could not find container \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": container with ID starting with bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.985460 5032 scope.go:117] "RemoveContainer" containerID="0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.985906 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": container with ID starting with 0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8 not found: ID does not exist" containerID="0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.985976 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} err="failed to get container status \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": rpc error: code = NotFound desc = could not find container \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": container with ID starting with 0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.986063 5032 scope.go:117] "RemoveContainer" containerID="f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.986436 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": container with ID starting with f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80 not found: ID does not exist" containerID="f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.986471 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} err="failed to get container status \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": rpc error: code = NotFound desc = could not find container \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": container with ID starting with f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.986496 5032 scope.go:117] "RemoveContainer" containerID="e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.986817 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": container with ID starting with e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1 not found: ID does not exist" containerID="e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.986852 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} err="failed to get container status \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": rpc error: code = NotFound desc = could not find container \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": container with ID starting with e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.986906 5032 scope.go:117] "RemoveContainer" containerID="e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446" Jan 22 16:16:11 crc kubenswrapper[5032]: E0122 16:16:11.987212 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": container with ID starting with e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446 not found: ID does not exist" containerID="e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.987248 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} err="failed to get container status \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": rpc error: code = NotFound desc = could not find container \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": container with ID starting with e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.987270 5032 scope.go:117] "RemoveContainer" containerID="98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.987579 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} err="failed to get container status \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": rpc error: code = NotFound desc = could not find container \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": container with ID starting with 98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.987614 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.987987 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} err="failed to get container status \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": rpc error: code = NotFound desc = could not find container \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": container with ID starting with 4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.988032 5032 scope.go:117] "RemoveContainer" containerID="9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.988372 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} err="failed to get container status \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": rpc error: code = NotFound desc = could not find container \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": container with ID starting with 9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.988403 5032 scope.go:117] "RemoveContainer" containerID="48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.988685 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} err="failed to get container status \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": rpc error: code = NotFound desc = could not find container \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": container with ID starting with 48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.988717 5032 scope.go:117] "RemoveContainer" containerID="2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.989042 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} err="failed to get container status \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": rpc error: code = NotFound desc = could not find container \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": container with ID starting with 2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.989077 5032 scope.go:117] "RemoveContainer" containerID="bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.989445 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} err="failed to get container status \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": rpc error: code = NotFound desc = could not find container \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": container with ID starting with bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.989505 5032 scope.go:117] "RemoveContainer" containerID="0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.990069 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} err="failed to get container status \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": rpc error: code = NotFound desc = could not find container \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": container with ID starting with 0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.990116 5032 scope.go:117] "RemoveContainer" containerID="f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.990494 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} err="failed to get container status \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": rpc error: code = NotFound desc = could not find container \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": container with ID starting with f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.990549 5032 scope.go:117] "RemoveContainer" containerID="e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.990988 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} err="failed to get container status \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": rpc error: code = NotFound desc = could not find container \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": container with ID starting with e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.991047 5032 scope.go:117] "RemoveContainer" containerID="e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.991402 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} err="failed to get container status \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": rpc error: code = NotFound desc = could not find container \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": container with ID starting with e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.991453 5032 scope.go:117] "RemoveContainer" containerID="98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.991818 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} err="failed to get container status \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": rpc error: code = NotFound desc = could not find container \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": container with ID starting with 98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.991929 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.992311 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} err="failed to get container status \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": rpc error: code = NotFound desc = could not find container \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": container with ID starting with 4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.992354 5032 scope.go:117] "RemoveContainer" containerID="9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.992715 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} err="failed to get container status \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": rpc error: code = NotFound desc = could not find container \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": container with ID starting with 9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.992769 5032 scope.go:117] "RemoveContainer" containerID="48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.993177 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} err="failed to get container status \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": rpc error: code = NotFound desc = could not find container \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": container with ID starting with 48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.993213 5032 scope.go:117] "RemoveContainer" containerID="2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.993590 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} err="failed to get container status \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": rpc error: code = NotFound desc = could not find container \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": container with ID starting with 2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.993664 5032 scope.go:117] "RemoveContainer" containerID="bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.994194 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} err="failed to get container status \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": rpc error: code = NotFound desc = could not find container \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": container with ID starting with bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.994248 5032 scope.go:117] "RemoveContainer" containerID="0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.994608 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} err="failed to get container status \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": rpc error: code = NotFound desc = could not find container \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": container with ID starting with 0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.994662 5032 scope.go:117] "RemoveContainer" containerID="f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.995051 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} err="failed to get container status \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": rpc error: code = NotFound desc = could not find container \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": container with ID starting with f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.995085 5032 scope.go:117] "RemoveContainer" containerID="e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.995466 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} err="failed to get container status \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": rpc error: code = NotFound desc = could not find container \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": container with ID starting with e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.995519 5032 scope.go:117] "RemoveContainer" containerID="e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.996117 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} err="failed to get container status \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": rpc error: code = NotFound desc = could not find container \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": container with ID starting with e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.996171 5032 scope.go:117] "RemoveContainer" containerID="98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.996534 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e"} err="failed to get container status \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": rpc error: code = NotFound desc = could not find container \"98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e\": container with ID starting with 98e64dc9d1e0ae6a610872b4e62c371a104babcf8ead0bfda9d0506e9403676e not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.996578 5032 scope.go:117] "RemoveContainer" containerID="4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.996959 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2"} err="failed to get container status \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": rpc error: code = NotFound desc = could not find container \"4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2\": container with ID starting with 4a8769516d84b19b3364c2cb493beb187872a323df0ec64ce12fa1e4aadd0da2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.997018 5032 scope.go:117] "RemoveContainer" containerID="9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.997385 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc"} err="failed to get container status \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": rpc error: code = NotFound desc = could not find container \"9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc\": container with ID starting with 9087adbc52317b13d4a646aa7a2fcd82757f1e22bfa553185ccdfcb4ab8b2ddc not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.997441 5032 scope.go:117] "RemoveContainer" containerID="48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.997781 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134"} err="failed to get container status \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": rpc error: code = NotFound desc = could not find container \"48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134\": container with ID starting with 48e1287572ad9b5b567eb647c9134d5a007ff7366ff471273ef2e5a036e13134 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.997826 5032 scope.go:117] "RemoveContainer" containerID="2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.998249 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2"} err="failed to get container status \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": rpc error: code = NotFound desc = could not find container \"2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2\": container with ID starting with 2a519cf9424144dc5046e3888afdbbf62c86fcbc29a2ef2c45dd8d91c0bac0a2 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.998306 5032 scope.go:117] "RemoveContainer" containerID="bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.998665 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718"} err="failed to get container status \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": rpc error: code = NotFound desc = could not find container \"bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718\": container with ID starting with bb0372ac4c67a2e3108724b8dededd1b142b84432ba2919535d47c4983345718 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.998717 5032 scope.go:117] "RemoveContainer" containerID="0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.999121 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8"} err="failed to get container status \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": rpc error: code = NotFound desc = could not find container \"0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8\": container with ID starting with 0f3ce42d12ad1948a7834c3f5430e9aab13439115e8e703eeac5310ce0be44b8 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.999175 5032 scope.go:117] "RemoveContainer" containerID="f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.999524 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80"} err="failed to get container status \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": rpc error: code = NotFound desc = could not find container \"f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80\": container with ID starting with f1bb350f809cabc83f18d72d60fc350795a1a7f78d8cca85bd30e37b57551e80 not found: ID does not exist" Jan 22 16:16:11 crc kubenswrapper[5032]: I0122 16:16:11.999565 5032 scope.go:117] "RemoveContainer" containerID="e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:11.999970 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1"} err="failed to get container status \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": rpc error: code = NotFound desc = could not find container \"e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1\": container with ID starting with e00160a2bba1e2e26e3149aa1a47ccdf6de14d078bb2323dbb513112a0cff4b1 not found: ID does not exist" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.000022 5032 scope.go:117] "RemoveContainer" containerID="e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.000380 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446"} err="failed to get container status \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": rpc error: code = NotFound desc = could not find container \"e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446\": container with ID starting with e42f0c9ca5624e92492dab8f9662dc9b1ce79657c9aba7c3c969a54634459446 not found: ID does not exist" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022409 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-cni-netd\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022526 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovnkube-script-lib\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022568 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5njh6\" (UniqueName: \"kubernetes.io/projected/a1bd651e-300c-4b75-af4f-76d9b64bafff-kube-api-access-5njh6\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022623 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-systemd\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022668 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-systemd-units\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022651 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-cni-netd\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022718 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022773 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-slash\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022832 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-etc-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022923 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-run-ovn-kubernetes\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.022977 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-var-lib-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023022 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovnkube-config\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023077 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-node-log\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023142 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023218 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-kubelet\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023153 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-kubelet\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023187 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-slash\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023263 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-node-log\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023142 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-etc-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023295 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-run-ovn-kubernetes\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023310 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-systemd\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.023337 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-systemd-units\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024424 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovnkube-config\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024429 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovnkube-script-lib\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024540 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-env-overrides\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024583 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovn-node-metrics-cert\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024616 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-cni-bin\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024649 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-ovn\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024682 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-log-socket\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024713 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024780 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-run-netns\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024882 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-run-netns\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024929 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-ovn\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024930 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-host-cni-bin\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.024978 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-log-socket\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.025010 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-run-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.025462 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1bd651e-300c-4b75-af4f-76d9b64bafff-env-overrides\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.025803 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1bd651e-300c-4b75-af4f-76d9b64bafff-var-lib-openvswitch\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.031655 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1bd651e-300c-4b75-af4f-76d9b64bafff-ovn-node-metrics-cert\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.046803 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5njh6\" (UniqueName: \"kubernetes.io/projected/a1bd651e-300c-4b75-af4f-76d9b64bafff-kube-api-access-5njh6\") pod \"ovnkube-node-r42rs\" (UID: \"a1bd651e-300c-4b75-af4f-76d9b64bafff\") " pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.107161 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5q2w8"] Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.111690 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5q2w8"] Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.119348 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.467271 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba09835f-0f75-4266-9ae2-39dc3cb0fd4c" path="/var/lib/kubelet/pods/ba09835f-0f75-4266-9ae2-39dc3cb0fd4c/volumes" Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.741394 5032 generic.go:334] "Generic (PLEG): container finished" podID="a1bd651e-300c-4b75-af4f-76d9b64bafff" containerID="4f4185b6c951009fbe7eb0c9731fa2818df517cf009276576b66a5a52de3da82" exitCode=0 Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.741588 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerDied","Data":"4f4185b6c951009fbe7eb0c9731fa2818df517cf009276576b66a5a52de3da82"} Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.742368 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"8751d34f80a0447b51e188d88c770ebad3ec6123041215dcf01125c4327410e6"} Jan 22 16:16:12 crc kubenswrapper[5032]: I0122 16:16:12.746614 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/2.log" Jan 22 16:16:13 crc kubenswrapper[5032]: I0122 16:16:13.761328 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"351d19a054031fc8aa216aa476a5272c24f1b48365463b8f4a6fd5c48049aae3"} Jan 22 16:16:13 crc kubenswrapper[5032]: I0122 16:16:13.763235 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"a2e01867c5ae09d1240edbfdad9ffdbe0cd20fb0af526726872c8d57f47aba3e"} Jan 22 16:16:13 crc kubenswrapper[5032]: I0122 16:16:13.763534 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"4ea1a64f92f54344551995a1d76816fdaaadc9091d6481cba13a54dbb24a5469"} Jan 22 16:16:14 crc kubenswrapper[5032]: I0122 16:16:14.776192 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"3c30545fa301e5d9570d57fda16436cbb27808feef37fc7b62393ef904696c86"} Jan 22 16:16:14 crc kubenswrapper[5032]: I0122 16:16:14.776541 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"c2372fa4138fadc4f55e1ac8be62ae6eb7684c6ee6c67e97f1dd91691f8a826b"} Jan 22 16:16:14 crc kubenswrapper[5032]: I0122 16:16:14.776564 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"1880c3f71be50fe4372eb60f4721370e4b32f79a26f8467e031abc5d59ca9e34"} Jan 22 16:16:16 crc kubenswrapper[5032]: I0122 16:16:16.807423 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"b12d94ede6326b95510b6246f9934ac15f2fcc476c6a7b01fd181c93d387b059"} Jan 22 16:16:18 crc kubenswrapper[5032]: I0122 16:16:18.823689 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" event={"ID":"a1bd651e-300c-4b75-af4f-76d9b64bafff","Type":"ContainerStarted","Data":"fb1b1cb1bbe2969d5c7ece1b7ce4065f067192332b6128696f4238a131ad8815"} Jan 22 16:16:18 crc kubenswrapper[5032]: I0122 16:16:18.824568 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:18 crc kubenswrapper[5032]: I0122 16:16:18.824587 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:18 crc kubenswrapper[5032]: I0122 16:16:18.857510 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:18 crc kubenswrapper[5032]: I0122 16:16:18.861750 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" podStartSLOduration=7.861724044 podStartE2EDuration="7.861724044s" podCreationTimestamp="2026-01-22 16:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:16:18.856484456 +0000 UTC m=+682.668653497" watchObservedRunningTime="2026-01-22 16:16:18.861724044 +0000 UTC m=+682.673893095" Jan 22 16:16:19 crc kubenswrapper[5032]: I0122 16:16:19.829598 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:19 crc kubenswrapper[5032]: I0122 16:16:19.879247 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:25 crc kubenswrapper[5032]: I0122 16:16:25.455174 5032 scope.go:117] "RemoveContainer" containerID="b3bdb803d72f4f9eba58e5a41df66631589f736b5541903eb85b9763e27abf86" Jan 22 16:16:25 crc kubenswrapper[5032]: E0122 16:16:25.455919 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-zfz9c_openshift-multus(d111dfc5-e30e-477d-81c1-afcc5bd064ad)\"" pod="openshift-multus/multus-zfz9c" podUID="d111dfc5-e30e-477d-81c1-afcc5bd064ad" Jan 22 16:16:38 crc kubenswrapper[5032]: I0122 16:16:38.456255 5032 scope.go:117] "RemoveContainer" containerID="b3bdb803d72f4f9eba58e5a41df66631589f736b5541903eb85b9763e27abf86" Jan 22 16:16:39 crc kubenswrapper[5032]: I0122 16:16:39.956595 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zfz9c_d111dfc5-e30e-477d-81c1-afcc5bd064ad/kube-multus/2.log" Jan 22 16:16:39 crc kubenswrapper[5032]: I0122 16:16:39.957012 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zfz9c" event={"ID":"d111dfc5-e30e-477d-81c1-afcc5bd064ad","Type":"ContainerStarted","Data":"50df9bba2c0465452f9b44e822c87d914feabec59cc3085f4581fbe8a80a9346"} Jan 22 16:16:42 crc kubenswrapper[5032]: I0122 16:16:42.154189 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-r42rs" Jan 22 16:16:52 crc kubenswrapper[5032]: I0122 16:16:52.996333 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw"] Jan 22 16:16:52 crc kubenswrapper[5032]: I0122 16:16:52.998042 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.001462 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.010267 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw"] Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.053535 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.053731 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.053925 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tv5j7\" (UniqueName: \"kubernetes.io/projected/bb030491-c854-459b-9688-96a54419708f-kube-api-access-tv5j7\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.155047 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.155112 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tv5j7\" (UniqueName: \"kubernetes.io/projected/bb030491-c854-459b-9688-96a54419708f-kube-api-access-tv5j7\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.155188 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.156153 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.156240 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.185834 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tv5j7\" (UniqueName: \"kubernetes.io/projected/bb030491-c854-459b-9688-96a54419708f-kube-api-access-tv5j7\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.375300 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:16:53 crc kubenswrapper[5032]: I0122 16:16:53.868255 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw"] Jan 22 16:16:54 crc kubenswrapper[5032]: I0122 16:16:54.081143 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" event={"ID":"bb030491-c854-459b-9688-96a54419708f","Type":"ContainerStarted","Data":"f945bdf442d66139de3ea179eac728301cb5b21594e924d15e62c7ded35006d4"} Jan 22 16:16:55 crc kubenswrapper[5032]: I0122 16:16:55.087213 5032 generic.go:334] "Generic (PLEG): container finished" podID="bb030491-c854-459b-9688-96a54419708f" containerID="cb0aecbc1f68dd33c39d1b8ec91a1036493678a2082c3676a0840a32458e1668" exitCode=0 Jan 22 16:16:55 crc kubenswrapper[5032]: I0122 16:16:55.087262 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" event={"ID":"bb030491-c854-459b-9688-96a54419708f","Type":"ContainerDied","Data":"cb0aecbc1f68dd33c39d1b8ec91a1036493678a2082c3676a0840a32458e1668"} Jan 22 16:17:00 crc kubenswrapper[5032]: I0122 16:17:00.127917 5032 generic.go:334] "Generic (PLEG): container finished" podID="bb030491-c854-459b-9688-96a54419708f" containerID="3b1f9cd948fc1584e5d89cad4a8f774b1e0212128d5fd87b49e0c5b800edacc1" exitCode=0 Jan 22 16:17:00 crc kubenswrapper[5032]: I0122 16:17:00.128055 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" event={"ID":"bb030491-c854-459b-9688-96a54419708f","Type":"ContainerDied","Data":"3b1f9cd948fc1584e5d89cad4a8f774b1e0212128d5fd87b49e0c5b800edacc1"} Jan 22 16:17:02 crc kubenswrapper[5032]: I0122 16:17:02.153334 5032 generic.go:334] "Generic (PLEG): container finished" podID="bb030491-c854-459b-9688-96a54419708f" containerID="e86636e84a770603c1155f2b9fc44960b0f969578dbe6b39b81bcfcac5527aa4" exitCode=0 Jan 22 16:17:02 crc kubenswrapper[5032]: I0122 16:17:02.153792 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" event={"ID":"bb030491-c854-459b-9688-96a54419708f","Type":"ContainerDied","Data":"e86636e84a770603c1155f2b9fc44960b0f969578dbe6b39b81bcfcac5527aa4"} Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.017700 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.017889 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.436129 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.511126 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tv5j7\" (UniqueName: \"kubernetes.io/projected/bb030491-c854-459b-9688-96a54419708f-kube-api-access-tv5j7\") pod \"bb030491-c854-459b-9688-96a54419708f\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.511330 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-bundle\") pod \"bb030491-c854-459b-9688-96a54419708f\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.511436 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-util\") pod \"bb030491-c854-459b-9688-96a54419708f\" (UID: \"bb030491-c854-459b-9688-96a54419708f\") " Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.512537 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-bundle" (OuterVolumeSpecName: "bundle") pod "bb030491-c854-459b-9688-96a54419708f" (UID: "bb030491-c854-459b-9688-96a54419708f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.520159 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb030491-c854-459b-9688-96a54419708f-kube-api-access-tv5j7" (OuterVolumeSpecName: "kube-api-access-tv5j7") pod "bb030491-c854-459b-9688-96a54419708f" (UID: "bb030491-c854-459b-9688-96a54419708f"). InnerVolumeSpecName "kube-api-access-tv5j7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.532271 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-util" (OuterVolumeSpecName: "util") pod "bb030491-c854-459b-9688-96a54419708f" (UID: "bb030491-c854-459b-9688-96a54419708f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.613201 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tv5j7\" (UniqueName: \"kubernetes.io/projected/bb030491-c854-459b-9688-96a54419708f-kube-api-access-tv5j7\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.613266 5032 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:03 crc kubenswrapper[5032]: I0122 16:17:03.613285 5032 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb030491-c854-459b-9688-96a54419708f-util\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:04 crc kubenswrapper[5032]: I0122 16:17:04.173321 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" event={"ID":"bb030491-c854-459b-9688-96a54419708f","Type":"ContainerDied","Data":"f945bdf442d66139de3ea179eac728301cb5b21594e924d15e62c7ded35006d4"} Jan 22 16:17:04 crc kubenswrapper[5032]: I0122 16:17:04.174007 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f945bdf442d66139de3ea179eac728301cb5b21594e924d15e62c7ded35006d4" Jan 22 16:17:04 crc kubenswrapper[5032]: I0122 16:17:04.173468 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.580898 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-x2pwd"] Jan 22 16:17:09 crc kubenswrapper[5032]: E0122 16:17:09.581626 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb030491-c854-459b-9688-96a54419708f" containerName="util" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.581641 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb030491-c854-459b-9688-96a54419708f" containerName="util" Jan 22 16:17:09 crc kubenswrapper[5032]: E0122 16:17:09.581656 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb030491-c854-459b-9688-96a54419708f" containerName="extract" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.581665 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb030491-c854-459b-9688-96a54419708f" containerName="extract" Jan 22 16:17:09 crc kubenswrapper[5032]: E0122 16:17:09.581677 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb030491-c854-459b-9688-96a54419708f" containerName="pull" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.581687 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb030491-c854-459b-9688-96a54419708f" containerName="pull" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.581820 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb030491-c854-459b-9688-96a54419708f" containerName="extract" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.582327 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.586483 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.586697 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.588497 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-xc2wx" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.629268 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-x2pwd"] Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.721661 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpd9d\" (UniqueName: \"kubernetes.io/projected/2b63c1dc-4cef-491f-b146-cdad3ca66d18-kube-api-access-xpd9d\") pod \"nmstate-operator-646758c888-x2pwd\" (UID: \"2b63c1dc-4cef-491f-b146-cdad3ca66d18\") " pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.823342 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpd9d\" (UniqueName: \"kubernetes.io/projected/2b63c1dc-4cef-491f-b146-cdad3ca66d18-kube-api-access-xpd9d\") pod \"nmstate-operator-646758c888-x2pwd\" (UID: \"2b63c1dc-4cef-491f-b146-cdad3ca66d18\") " pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.849577 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpd9d\" (UniqueName: \"kubernetes.io/projected/2b63c1dc-4cef-491f-b146-cdad3ca66d18-kube-api-access-xpd9d\") pod \"nmstate-operator-646758c888-x2pwd\" (UID: \"2b63c1dc-4cef-491f-b146-cdad3ca66d18\") " pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" Jan 22 16:17:09 crc kubenswrapper[5032]: I0122 16:17:09.930929 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" Jan 22 16:17:10 crc kubenswrapper[5032]: I0122 16:17:10.151692 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-x2pwd"] Jan 22 16:17:10 crc kubenswrapper[5032]: I0122 16:17:10.213305 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" event={"ID":"2b63c1dc-4cef-491f-b146-cdad3ca66d18","Type":"ContainerStarted","Data":"9bd8f0ee2cc1c234d5dfa7af9379bc967b52cef8c7a5c85089ca6ba7172f6fe4"} Jan 22 16:17:13 crc kubenswrapper[5032]: I0122 16:17:13.233096 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" event={"ID":"2b63c1dc-4cef-491f-b146-cdad3ca66d18","Type":"ContainerStarted","Data":"e491af175890fe8e19c02a8576d8aeec9255579005d44986518ba4ea2f1156bf"} Jan 22 16:17:13 crc kubenswrapper[5032]: I0122 16:17:13.252542 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-x2pwd" podStartSLOduration=1.9957252300000001 podStartE2EDuration="4.25252477s" podCreationTimestamp="2026-01-22 16:17:09 +0000 UTC" firstStartedPulling="2026-01-22 16:17:10.162678617 +0000 UTC m=+733.974847658" lastFinishedPulling="2026-01-22 16:17:12.419478157 +0000 UTC m=+736.231647198" observedRunningTime="2026-01-22 16:17:13.248455311 +0000 UTC m=+737.060624352" watchObservedRunningTime="2026-01-22 16:17:13.25252477 +0000 UTC m=+737.064693801" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.165315 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-l4hfh"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.166399 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.168729 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-9wjgl" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.181256 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.182952 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.185661 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.205505 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-znmml"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.206258 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.239466 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-l4hfh"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.266831 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.284342 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjjnh\" (UniqueName: \"kubernetes.io/projected/0b20496f-a661-4546-b7fc-dd3be2b1914c-kube-api-access-vjjnh\") pod \"nmstate-webhook-8474b5b9d8-rv2xr\" (UID: \"0b20496f-a661-4546-b7fc-dd3be2b1914c\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.284404 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-ovs-socket\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.284451 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0b20496f-a661-4546-b7fc-dd3be2b1914c-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-rv2xr\" (UID: \"0b20496f-a661-4546-b7fc-dd3be2b1914c\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.284611 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5p2t\" (UniqueName: \"kubernetes.io/projected/d0470720-9420-4c28-9b86-07d2fabd98eb-kube-api-access-n5p2t\") pod \"nmstate-metrics-54757c584b-l4hfh\" (UID: \"d0470720-9420-4c28-9b86-07d2fabd98eb\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.284693 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-nmstate-lock\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.284732 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-dbus-socket\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.284767 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc5bk\" (UniqueName: \"kubernetes.io/projected/f431be2d-ceb6-4b82-a770-6827a13d1bb1-kube-api-access-vc5bk\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.337269 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.338003 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.339605 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.340357 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.341776 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-ztl6n" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.354013 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385620 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjjnh\" (UniqueName: \"kubernetes.io/projected/0b20496f-a661-4546-b7fc-dd3be2b1914c-kube-api-access-vjjnh\") pod \"nmstate-webhook-8474b5b9d8-rv2xr\" (UID: \"0b20496f-a661-4546-b7fc-dd3be2b1914c\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385669 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-ovs-socket\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385719 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0b20496f-a661-4546-b7fc-dd3be2b1914c-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-rv2xr\" (UID: \"0b20496f-a661-4546-b7fc-dd3be2b1914c\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385753 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5p2t\" (UniqueName: \"kubernetes.io/projected/d0470720-9420-4c28-9b86-07d2fabd98eb-kube-api-access-n5p2t\") pod \"nmstate-metrics-54757c584b-l4hfh\" (UID: \"d0470720-9420-4c28-9b86-07d2fabd98eb\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385778 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-nmstate-lock\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385796 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-dbus-socket\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385802 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-ovs-socket\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.386075 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-nmstate-lock\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.385812 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc5bk\" (UniqueName: \"kubernetes.io/projected/f431be2d-ceb6-4b82-a770-6827a13d1bb1-kube-api-access-vc5bk\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.386341 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f431be2d-ceb6-4b82-a770-6827a13d1bb1-dbus-socket\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.391189 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0b20496f-a661-4546-b7fc-dd3be2b1914c-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-rv2xr\" (UID: \"0b20496f-a661-4546-b7fc-dd3be2b1914c\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.401372 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5p2t\" (UniqueName: \"kubernetes.io/projected/d0470720-9420-4c28-9b86-07d2fabd98eb-kube-api-access-n5p2t\") pod \"nmstate-metrics-54757c584b-l4hfh\" (UID: \"d0470720-9420-4c28-9b86-07d2fabd98eb\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.402233 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjjnh\" (UniqueName: \"kubernetes.io/projected/0b20496f-a661-4546-b7fc-dd3be2b1914c-kube-api-access-vjjnh\") pod \"nmstate-webhook-8474b5b9d8-rv2xr\" (UID: \"0b20496f-a661-4546-b7fc-dd3be2b1914c\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.402740 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc5bk\" (UniqueName: \"kubernetes.io/projected/f431be2d-ceb6-4b82-a770-6827a13d1bb1-kube-api-access-vc5bk\") pod \"nmstate-handler-znmml\" (UID: \"f431be2d-ceb6-4b82-a770-6827a13d1bb1\") " pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.480103 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.487728 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7wml\" (UniqueName: \"kubernetes.io/projected/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-kube-api-access-h7wml\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.487935 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.487984 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.493139 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-b99d66cb5-gc5jr"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.493880 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.533210 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.542851 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-b99d66cb5-gc5jr"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.547986 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590295 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-serving-cert\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590348 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-trusted-ca-bundle\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590380 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-oauth-config\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590423 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590454 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590482 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-oauth-serving-cert\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590520 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-config\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590542 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqb6x\" (UniqueName: \"kubernetes.io/projected/ef5b391a-db37-4ff4-bf28-22d995f802c9-kube-api-access-zqb6x\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590566 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7wml\" (UniqueName: \"kubernetes.io/projected/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-kube-api-access-h7wml\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.590590 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-service-ca\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.592144 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.597555 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.625789 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7wml\" (UniqueName: \"kubernetes.io/projected/d647a337-b4fc-4c83-8bc5-1efc10b5eda5-kube-api-access-h7wml\") pod \"nmstate-console-plugin-7754f76f8b-gl429\" (UID: \"d647a337-b4fc-4c83-8bc5-1efc10b5eda5\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.651019 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.692537 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-oauth-config\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.692598 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-oauth-serving-cert\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.692633 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-config\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.693591 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-config\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.692651 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqb6x\" (UniqueName: \"kubernetes.io/projected/ef5b391a-db37-4ff4-bf28-22d995f802c9-kube-api-access-zqb6x\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.693682 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-service-ca\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.693738 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-oauth-serving-cert\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.694598 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-service-ca\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.694659 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-serving-cert\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.694679 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-trusted-ca-bundle\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.696886 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ef5b391a-db37-4ff4-bf28-22d995f802c9-trusted-ca-bundle\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.698488 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-oauth-config\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.699283 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ef5b391a-db37-4ff4-bf28-22d995f802c9-console-serving-cert\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.723677 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqb6x\" (UniqueName: \"kubernetes.io/projected/ef5b391a-db37-4ff4-bf28-22d995f802c9-kube-api-access-zqb6x\") pod \"console-b99d66cb5-gc5jr\" (UID: \"ef5b391a-db37-4ff4-bf28-22d995f802c9\") " pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.796761 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-l4hfh"] Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.844417 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.872992 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr"] Jan 22 16:17:14 crc kubenswrapper[5032]: W0122 16:17:14.875764 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b20496f_a661_4546_b7fc_dd3be2b1914c.slice/crio-029ea1d4fa02524c4bf293cd2458a286afba16698e67fa2ab91992d2cd898441 WatchSource:0}: Error finding container 029ea1d4fa02524c4bf293cd2458a286afba16698e67fa2ab91992d2cd898441: Status 404 returned error can't find the container with id 029ea1d4fa02524c4bf293cd2458a286afba16698e67fa2ab91992d2cd898441 Jan 22 16:17:14 crc kubenswrapper[5032]: I0122 16:17:14.927148 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429"] Jan 22 16:17:15 crc kubenswrapper[5032]: I0122 16:17:15.297196 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" event={"ID":"d0470720-9420-4c28-9b86-07d2fabd98eb","Type":"ContainerStarted","Data":"ca469e0ea907c769e5418a53b92e066163432a38d0cbf83f518f7608ea10bded"} Jan 22 16:17:15 crc kubenswrapper[5032]: I0122 16:17:15.300920 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" event={"ID":"0b20496f-a661-4546-b7fc-dd3be2b1914c","Type":"ContainerStarted","Data":"029ea1d4fa02524c4bf293cd2458a286afba16698e67fa2ab91992d2cd898441"} Jan 22 16:17:15 crc kubenswrapper[5032]: I0122 16:17:15.305323 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" event={"ID":"d647a337-b4fc-4c83-8bc5-1efc10b5eda5","Type":"ContainerStarted","Data":"f946baec9b6987d8102769737bf115576e713b6e373df257b3def402d9959827"} Jan 22 16:17:15 crc kubenswrapper[5032]: I0122 16:17:15.307498 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-znmml" event={"ID":"f431be2d-ceb6-4b82-a770-6827a13d1bb1","Type":"ContainerStarted","Data":"0bb08a03d045bf5367a08dc1af6c5cbd643b504020b3fc65e322c181f0b75e97"} Jan 22 16:17:15 crc kubenswrapper[5032]: I0122 16:17:15.334766 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-b99d66cb5-gc5jr"] Jan 22 16:17:15 crc kubenswrapper[5032]: W0122 16:17:15.347809 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef5b391a_db37_4ff4_bf28_22d995f802c9.slice/crio-2b9ab271a5ee9e524cf67064675f2e6b3608567dca3b95bce83f5cc704f9cc51 WatchSource:0}: Error finding container 2b9ab271a5ee9e524cf67064675f2e6b3608567dca3b95bce83f5cc704f9cc51: Status 404 returned error can't find the container with id 2b9ab271a5ee9e524cf67064675f2e6b3608567dca3b95bce83f5cc704f9cc51 Jan 22 16:17:16 crc kubenswrapper[5032]: I0122 16:17:16.317231 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-b99d66cb5-gc5jr" event={"ID":"ef5b391a-db37-4ff4-bf28-22d995f802c9","Type":"ContainerStarted","Data":"eb21c3e4e8c449313e9688d3dc26b0822c15beceb88c5d113f4121a9679f59ad"} Jan 22 16:17:16 crc kubenswrapper[5032]: I0122 16:17:16.317784 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-b99d66cb5-gc5jr" event={"ID":"ef5b391a-db37-4ff4-bf28-22d995f802c9","Type":"ContainerStarted","Data":"2b9ab271a5ee9e524cf67064675f2e6b3608567dca3b95bce83f5cc704f9cc51"} Jan 22 16:17:16 crc kubenswrapper[5032]: I0122 16:17:16.338035 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-b99d66cb5-gc5jr" podStartSLOduration=2.338012176 podStartE2EDuration="2.338012176s" podCreationTimestamp="2026-01-22 16:17:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:17:16.336095085 +0000 UTC m=+740.148264116" watchObservedRunningTime="2026-01-22 16:17:16.338012176 +0000 UTC m=+740.150181207" Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.341734 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" event={"ID":"d647a337-b4fc-4c83-8bc5-1efc10b5eda5","Type":"ContainerStarted","Data":"8f69a817cf9260a94c22e9574b35298c9faa8c6516b313c5092ea6c037187880"} Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.344960 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-znmml" event={"ID":"f431be2d-ceb6-4b82-a770-6827a13d1bb1","Type":"ContainerStarted","Data":"d0f1cb7e930fa32c32eb8775fdbff6682d964edb42ddbaf57bf56e7e66d2cb68"} Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.345181 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.347061 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" event={"ID":"d0470720-9420-4c28-9b86-07d2fabd98eb","Type":"ContainerStarted","Data":"6b6ebb6781f90fea9ea226fc1fa125755c64c36e115a9ca4c54dc7a8b54e2caa"} Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.349272 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" event={"ID":"0b20496f-a661-4546-b7fc-dd3be2b1914c","Type":"ContainerStarted","Data":"6bdaf592a47b7b5267da67ed4b5f09f41c558d7b07b17c7341191343662eb77c"} Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.349560 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.376362 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-gl429" podStartSLOduration=1.71433517 podStartE2EDuration="4.376329252s" podCreationTimestamp="2026-01-22 16:17:14 +0000 UTC" firstStartedPulling="2026-01-22 16:17:14.943125993 +0000 UTC m=+738.755295024" lastFinishedPulling="2026-01-22 16:17:17.605120045 +0000 UTC m=+741.417289106" observedRunningTime="2026-01-22 16:17:18.36616391 +0000 UTC m=+742.178332941" watchObservedRunningTime="2026-01-22 16:17:18.376329252 +0000 UTC m=+742.188498313" Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.429912 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-znmml" podStartSLOduration=1.4867629949999999 podStartE2EDuration="4.429884367s" podCreationTimestamp="2026-01-22 16:17:14 +0000 UTC" firstStartedPulling="2026-01-22 16:17:14.678254638 +0000 UTC m=+738.490423669" lastFinishedPulling="2026-01-22 16:17:17.62137601 +0000 UTC m=+741.433545041" observedRunningTime="2026-01-22 16:17:18.401377153 +0000 UTC m=+742.213546244" watchObservedRunningTime="2026-01-22 16:17:18.429884367 +0000 UTC m=+742.242053438" Jan 22 16:17:18 crc kubenswrapper[5032]: I0122 16:17:18.440640 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" podStartSLOduration=1.707784075 podStartE2EDuration="4.440625344s" podCreationTimestamp="2026-01-22 16:17:14 +0000 UTC" firstStartedPulling="2026-01-22 16:17:14.87841034 +0000 UTC m=+738.690579381" lastFinishedPulling="2026-01-22 16:17:17.611251609 +0000 UTC m=+741.423420650" observedRunningTime="2026-01-22 16:17:18.429698022 +0000 UTC m=+742.241867063" watchObservedRunningTime="2026-01-22 16:17:18.440625344 +0000 UTC m=+742.252794385" Jan 22 16:17:21 crc kubenswrapper[5032]: I0122 16:17:21.372582 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" event={"ID":"d0470720-9420-4c28-9b86-07d2fabd98eb","Type":"ContainerStarted","Data":"d275e1a029dc7e9efe39ab3d6b66d3d4a6a3911b3d5234accb0dd7eef1490b99"} Jan 22 16:17:21 crc kubenswrapper[5032]: I0122 16:17:21.396303 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-l4hfh" podStartSLOduration=1.9402152 podStartE2EDuration="7.396277992s" podCreationTimestamp="2026-01-22 16:17:14 +0000 UTC" firstStartedPulling="2026-01-22 16:17:14.803289188 +0000 UTC m=+738.615458219" lastFinishedPulling="2026-01-22 16:17:20.25935198 +0000 UTC m=+744.071521011" observedRunningTime="2026-01-22 16:17:21.389761448 +0000 UTC m=+745.201930509" watchObservedRunningTime="2026-01-22 16:17:21.396277992 +0000 UTC m=+745.208447033" Jan 22 16:17:24 crc kubenswrapper[5032]: I0122 16:17:24.584047 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-znmml" Jan 22 16:17:24 crc kubenswrapper[5032]: I0122 16:17:24.845938 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:24 crc kubenswrapper[5032]: I0122 16:17:24.846287 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:24 crc kubenswrapper[5032]: I0122 16:17:24.852905 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:25 crc kubenswrapper[5032]: I0122 16:17:25.410258 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-b99d66cb5-gc5jr" Jan 22 16:17:25 crc kubenswrapper[5032]: I0122 16:17:25.480390 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-w7mns"] Jan 22 16:17:26 crc kubenswrapper[5032]: I0122 16:17:26.462245 5032 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 16:17:33 crc kubenswrapper[5032]: I0122 16:17:33.016982 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:17:33 crc kubenswrapper[5032]: I0122 16:17:33.019222 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:17:34 crc kubenswrapper[5032]: I0122 16:17:34.543811 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-rv2xr" Jan 22 16:17:50 crc kubenswrapper[5032]: I0122 16:17:50.547663 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-w7mns" podUID="b3b48fcd-6341-4599-ae1e-f424e4a24f55" containerName="console" containerID="cri-o://9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc" gracePeriod=15 Jan 22 16:17:50 crc kubenswrapper[5032]: I0122 16:17:50.980385 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-w7mns_b3b48fcd-6341-4599-ae1e-f424e4a24f55/console/0.log" Jan 22 16:17:50 crc kubenswrapper[5032]: I0122 16:17:50.980996 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.039385 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg"] Jan 22 16:17:51 crc kubenswrapper[5032]: E0122 16:17:51.039660 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3b48fcd-6341-4599-ae1e-f424e4a24f55" containerName="console" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.039678 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3b48fcd-6341-4599-ae1e-f424e4a24f55" containerName="console" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.039801 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3b48fcd-6341-4599-ae1e-f424e4a24f55" containerName="console" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.040765 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.043224 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.056794 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg"] Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.064578 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-oauth-serving-cert\") pod \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.064722 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-serving-cert\") pod \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.064824 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-oauth-config\") pod \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.064950 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-service-ca\") pod \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.064998 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-trusted-ca-bundle\") pod \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.065156 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-config\") pod \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.065212 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvzhv\" (UniqueName: \"kubernetes.io/projected/b3b48fcd-6341-4599-ae1e-f424e4a24f55-kube-api-access-gvzhv\") pod \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\" (UID: \"b3b48fcd-6341-4599-ae1e-f424e4a24f55\") " Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.065993 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "b3b48fcd-6341-4599-ae1e-f424e4a24f55" (UID: "b3b48fcd-6341-4599-ae1e-f424e4a24f55"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.066014 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-service-ca" (OuterVolumeSpecName: "service-ca") pod "b3b48fcd-6341-4599-ae1e-f424e4a24f55" (UID: "b3b48fcd-6341-4599-ae1e-f424e4a24f55"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.066341 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "b3b48fcd-6341-4599-ae1e-f424e4a24f55" (UID: "b3b48fcd-6341-4599-ae1e-f424e4a24f55"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.066357 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-config" (OuterVolumeSpecName: "console-config") pod "b3b48fcd-6341-4599-ae1e-f424e4a24f55" (UID: "b3b48fcd-6341-4599-ae1e-f424e4a24f55"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.074425 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "b3b48fcd-6341-4599-ae1e-f424e4a24f55" (UID: "b3b48fcd-6341-4599-ae1e-f424e4a24f55"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.074774 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3b48fcd-6341-4599-ae1e-f424e4a24f55-kube-api-access-gvzhv" (OuterVolumeSpecName: "kube-api-access-gvzhv") pod "b3b48fcd-6341-4599-ae1e-f424e4a24f55" (UID: "b3b48fcd-6341-4599-ae1e-f424e4a24f55"). InnerVolumeSpecName "kube-api-access-gvzhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.074786 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "b3b48fcd-6341-4599-ae1e-f424e4a24f55" (UID: "b3b48fcd-6341-4599-ae1e-f424e4a24f55"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.166475 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p76r\" (UniqueName: \"kubernetes.io/projected/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-kube-api-access-8p76r\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.166536 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.166693 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.166942 5032 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.166973 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvzhv\" (UniqueName: \"kubernetes.io/projected/b3b48fcd-6341-4599-ae1e-f424e4a24f55-kube-api-access-gvzhv\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.166988 5032 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.166998 5032 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.167008 5032 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b3b48fcd-6341-4599-ae1e-f424e4a24f55-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.167018 5032 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.167026 5032 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3b48fcd-6341-4599-ae1e-f424e4a24f55-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.268801 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.268915 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.269100 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p76r\" (UniqueName: \"kubernetes.io/projected/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-kube-api-access-8p76r\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.269947 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.269981 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.300260 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p76r\" (UniqueName: \"kubernetes.io/projected/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-kube-api-access-8p76r\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.361911 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.611303 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-w7mns_b3b48fcd-6341-4599-ae1e-f424e4a24f55/console/0.log" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.611624 5032 generic.go:334] "Generic (PLEG): container finished" podID="b3b48fcd-6341-4599-ae1e-f424e4a24f55" containerID="9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc" exitCode=2 Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.611666 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-w7mns" event={"ID":"b3b48fcd-6341-4599-ae1e-f424e4a24f55","Type":"ContainerDied","Data":"9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc"} Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.611711 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-w7mns" event={"ID":"b3b48fcd-6341-4599-ae1e-f424e4a24f55","Type":"ContainerDied","Data":"1b194b15fcf312b4877c33f9a441b87ec87f0a76c3cc6b1d484216cfb79c9663"} Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.611738 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-w7mns" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.611757 5032 scope.go:117] "RemoveContainer" containerID="9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.646020 5032 scope.go:117] "RemoveContainer" containerID="9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc" Jan 22 16:17:51 crc kubenswrapper[5032]: E0122 16:17:51.646599 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc\": container with ID starting with 9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc not found: ID does not exist" containerID="9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.646654 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc"} err="failed to get container status \"9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc\": rpc error: code = NotFound desc = could not find container \"9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc\": container with ID starting with 9378970fdb291429b1fc18a93037acc5fe6ba9e20e2efd1951e1dff8d40749cc not found: ID does not exist" Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.650682 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-w7mns"] Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.653652 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-w7mns"] Jan 22 16:17:51 crc kubenswrapper[5032]: I0122 16:17:51.682601 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg"] Jan 22 16:17:51 crc kubenswrapper[5032]: W0122 16:17:51.692041 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b3f8a94_b262_4f1b_b42a_5cea8fad124b.slice/crio-fd145a6dd539d49edeb9b502605c19006057861bcf6793ea06d376af078ab2d3 WatchSource:0}: Error finding container fd145a6dd539d49edeb9b502605c19006057861bcf6793ea06d376af078ab2d3: Status 404 returned error can't find the container with id fd145a6dd539d49edeb9b502605c19006057861bcf6793ea06d376af078ab2d3 Jan 22 16:17:52 crc kubenswrapper[5032]: I0122 16:17:52.465059 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3b48fcd-6341-4599-ae1e-f424e4a24f55" path="/var/lib/kubelet/pods/b3b48fcd-6341-4599-ae1e-f424e4a24f55/volumes" Jan 22 16:17:52 crc kubenswrapper[5032]: I0122 16:17:52.625176 5032 generic.go:334] "Generic (PLEG): container finished" podID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerID="e9ddbae7eb3419ecdef72dd619adb7d6ef410d3b0f73911ce710e195d1dcd868" exitCode=0 Jan 22 16:17:52 crc kubenswrapper[5032]: I0122 16:17:52.625262 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" event={"ID":"7b3f8a94-b262-4f1b-b42a-5cea8fad124b","Type":"ContainerDied","Data":"e9ddbae7eb3419ecdef72dd619adb7d6ef410d3b0f73911ce710e195d1dcd868"} Jan 22 16:17:52 crc kubenswrapper[5032]: I0122 16:17:52.625306 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" event={"ID":"7b3f8a94-b262-4f1b-b42a-5cea8fad124b","Type":"ContainerStarted","Data":"fd145a6dd539d49edeb9b502605c19006057861bcf6793ea06d376af078ab2d3"} Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.591718 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cpn4v"] Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.594089 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.613725 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cpn4v"] Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.651198 5032 generic.go:334] "Generic (PLEG): container finished" podID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerID="76f143d6676a047d6b22032674a3ecd6686a809aac3798bd7c7366d5ec6c3c89" exitCode=0 Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.651354 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" event={"ID":"7b3f8a94-b262-4f1b-b42a-5cea8fad124b","Type":"ContainerDied","Data":"76f143d6676a047d6b22032674a3ecd6686a809aac3798bd7c7366d5ec6c3c89"} Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.724891 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlmvs\" (UniqueName: \"kubernetes.io/projected/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-kube-api-access-dlmvs\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.724985 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-catalog-content\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.725051 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-utilities\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.826703 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlmvs\" (UniqueName: \"kubernetes.io/projected/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-kube-api-access-dlmvs\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.826790 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-catalog-content\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.826852 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-utilities\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.827534 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-catalog-content\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.827550 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-utilities\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.854004 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlmvs\" (UniqueName: \"kubernetes.io/projected/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-kube-api-access-dlmvs\") pod \"redhat-operators-cpn4v\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:54 crc kubenswrapper[5032]: I0122 16:17:54.952089 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:17:55 crc kubenswrapper[5032]: I0122 16:17:55.396958 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cpn4v"] Jan 22 16:17:55 crc kubenswrapper[5032]: I0122 16:17:55.658284 5032 generic.go:334] "Generic (PLEG): container finished" podID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerID="7f06bcea6b2cb9403ec5fc9a6d259217619196e35b02445519680704615755e4" exitCode=0 Jan 22 16:17:55 crc kubenswrapper[5032]: I0122 16:17:55.658390 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cpn4v" event={"ID":"e17f6b27-9d46-42fb-a3cc-5888d1a7d122","Type":"ContainerDied","Data":"7f06bcea6b2cb9403ec5fc9a6d259217619196e35b02445519680704615755e4"} Jan 22 16:17:55 crc kubenswrapper[5032]: I0122 16:17:55.658681 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cpn4v" event={"ID":"e17f6b27-9d46-42fb-a3cc-5888d1a7d122","Type":"ContainerStarted","Data":"a712cd33583b8e5fdc9399d8ffa58f4b363054d17815130632601b1c0d7a71d7"} Jan 22 16:17:55 crc kubenswrapper[5032]: I0122 16:17:55.660672 5032 generic.go:334] "Generic (PLEG): container finished" podID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerID="084f468f384540d982130748e4b646efe3108732f4c785a0b8e80f778b48e908" exitCode=0 Jan 22 16:17:55 crc kubenswrapper[5032]: I0122 16:17:55.660753 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" event={"ID":"7b3f8a94-b262-4f1b-b42a-5cea8fad124b","Type":"ContainerDied","Data":"084f468f384540d982130748e4b646efe3108732f4c785a0b8e80f778b48e908"} Jan 22 16:17:56 crc kubenswrapper[5032]: I0122 16:17:56.676449 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cpn4v" event={"ID":"e17f6b27-9d46-42fb-a3cc-5888d1a7d122","Type":"ContainerStarted","Data":"2e3bfd26878a253cdf7d8a2d48e3693eed6196938a3c275e6bc3899ef933c8ef"} Jan 22 16:17:56 crc kubenswrapper[5032]: I0122 16:17:56.942103 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.060460 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-bundle\") pod \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.060534 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-util\") pod \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.060685 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p76r\" (UniqueName: \"kubernetes.io/projected/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-kube-api-access-8p76r\") pod \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\" (UID: \"7b3f8a94-b262-4f1b-b42a-5cea8fad124b\") " Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.062327 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-bundle" (OuterVolumeSpecName: "bundle") pod "7b3f8a94-b262-4f1b-b42a-5cea8fad124b" (UID: "7b3f8a94-b262-4f1b-b42a-5cea8fad124b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.067637 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-kube-api-access-8p76r" (OuterVolumeSpecName: "kube-api-access-8p76r") pod "7b3f8a94-b262-4f1b-b42a-5cea8fad124b" (UID: "7b3f8a94-b262-4f1b-b42a-5cea8fad124b"). InnerVolumeSpecName "kube-api-access-8p76r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.074672 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-util" (OuterVolumeSpecName: "util") pod "7b3f8a94-b262-4f1b-b42a-5cea8fad124b" (UID: "7b3f8a94-b262-4f1b-b42a-5cea8fad124b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.162651 5032 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.163030 5032 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-util\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.163171 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p76r\" (UniqueName: \"kubernetes.io/projected/7b3f8a94-b262-4f1b-b42a-5cea8fad124b-kube-api-access-8p76r\") on node \"crc\" DevicePath \"\"" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.689419 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" event={"ID":"7b3f8a94-b262-4f1b-b42a-5cea8fad124b","Type":"ContainerDied","Data":"fd145a6dd539d49edeb9b502605c19006057861bcf6793ea06d376af078ab2d3"} Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.689466 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd145a6dd539d49edeb9b502605c19006057861bcf6793ea06d376af078ab2d3" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.689525 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg" Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.695078 5032 generic.go:334] "Generic (PLEG): container finished" podID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerID="2e3bfd26878a253cdf7d8a2d48e3693eed6196938a3c275e6bc3899ef933c8ef" exitCode=0 Jan 22 16:17:57 crc kubenswrapper[5032]: I0122 16:17:57.695159 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cpn4v" event={"ID":"e17f6b27-9d46-42fb-a3cc-5888d1a7d122","Type":"ContainerDied","Data":"2e3bfd26878a253cdf7d8a2d48e3693eed6196938a3c275e6bc3899ef933c8ef"} Jan 22 16:17:58 crc kubenswrapper[5032]: I0122 16:17:58.705804 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cpn4v" event={"ID":"e17f6b27-9d46-42fb-a3cc-5888d1a7d122","Type":"ContainerStarted","Data":"6fff7babb217a942b6707ba516a47b0ef4cec73387b27fe58677255a9d068c64"} Jan 22 16:17:58 crc kubenswrapper[5032]: I0122 16:17:58.734703 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cpn4v" podStartSLOduration=2.283184398 podStartE2EDuration="4.734675898s" podCreationTimestamp="2026-01-22 16:17:54 +0000 UTC" firstStartedPulling="2026-01-22 16:17:55.660486681 +0000 UTC m=+779.472655712" lastFinishedPulling="2026-01-22 16:17:58.111978161 +0000 UTC m=+781.924147212" observedRunningTime="2026-01-22 16:17:58.727617107 +0000 UTC m=+782.539786178" watchObservedRunningTime="2026-01-22 16:17:58.734675898 +0000 UTC m=+782.546844969" Jan 22 16:18:03 crc kubenswrapper[5032]: I0122 16:18:03.016647 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:18:03 crc kubenswrapper[5032]: I0122 16:18:03.017290 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:18:03 crc kubenswrapper[5032]: I0122 16:18:03.017350 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:18:03 crc kubenswrapper[5032]: I0122 16:18:03.017975 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d1c2cb7c0f9349c17ae9e8e3a56654bf423570a8cdc5ea89c7a9b6e4747682cd"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:18:03 crc kubenswrapper[5032]: I0122 16:18:03.018029 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://d1c2cb7c0f9349c17ae9e8e3a56654bf423570a8cdc5ea89c7a9b6e4747682cd" gracePeriod=600 Jan 22 16:18:04 crc kubenswrapper[5032]: I0122 16:18:04.758448 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"d1c2cb7c0f9349c17ae9e8e3a56654bf423570a8cdc5ea89c7a9b6e4747682cd"} Jan 22 16:18:04 crc kubenswrapper[5032]: I0122 16:18:04.759059 5032 scope.go:117] "RemoveContainer" containerID="3d9ae4c80605a2263a3c38f7ff610faacbf3ed02dfd61676693b0cc5bc82a3c4" Jan 22 16:18:04 crc kubenswrapper[5032]: I0122 16:18:04.758074 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="d1c2cb7c0f9349c17ae9e8e3a56654bf423570a8cdc5ea89c7a9b6e4747682cd" exitCode=0 Jan 22 16:18:04 crc kubenswrapper[5032]: I0122 16:18:04.759341 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"7c99ec9e6db96c22555ef66c4f25515b95be204438e7cfc23aad474c49a63d5f"} Jan 22 16:18:04 crc kubenswrapper[5032]: I0122 16:18:04.953134 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:18:04 crc kubenswrapper[5032]: I0122 16:18:04.953600 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:18:05 crc kubenswrapper[5032]: I0122 16:18:05.012490 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:18:05 crc kubenswrapper[5032]: I0122 16:18:05.823296 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.092260 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8"] Jan 22 16:18:07 crc kubenswrapper[5032]: E0122 16:18:07.092906 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerName="extract" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.092921 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerName="extract" Jan 22 16:18:07 crc kubenswrapper[5032]: E0122 16:18:07.092940 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerName="util" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.092949 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerName="util" Jan 22 16:18:07 crc kubenswrapper[5032]: E0122 16:18:07.092958 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerName="pull" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.092965 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerName="pull" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.093069 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b3f8a94-b262-4f1b-b42a-5cea8fad124b" containerName="extract" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.093566 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.101423 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.101707 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-6mdcr" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.102313 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.102630 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.103976 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.121834 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8"] Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.216548 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e6a0f38-95b2-48f5-80f9-612c5508912d-apiservice-cert\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.216598 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tvp6\" (UniqueName: \"kubernetes.io/projected/2e6a0f38-95b2-48f5-80f9-612c5508912d-kube-api-access-2tvp6\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.216634 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e6a0f38-95b2-48f5-80f9-612c5508912d-webhook-cert\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.318563 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e6a0f38-95b2-48f5-80f9-612c5508912d-apiservice-cert\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.318630 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tvp6\" (UniqueName: \"kubernetes.io/projected/2e6a0f38-95b2-48f5-80f9-612c5508912d-kube-api-access-2tvp6\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.318667 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e6a0f38-95b2-48f5-80f9-612c5508912d-webhook-cert\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.326288 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e6a0f38-95b2-48f5-80f9-612c5508912d-apiservice-cert\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.335881 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tvp6\" (UniqueName: \"kubernetes.io/projected/2e6a0f38-95b2-48f5-80f9-612c5508912d-kube-api-access-2tvp6\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.338103 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e6a0f38-95b2-48f5-80f9-612c5508912d-webhook-cert\") pod \"metallb-operator-controller-manager-79794f8d77-hg8k8\" (UID: \"2e6a0f38-95b2-48f5-80f9-612c5508912d\") " pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.408834 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.442601 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn"] Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.446447 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.451348 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.451543 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.451741 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-6l4n2" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.476949 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn"] Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.521611 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8dc54e54-b180-487b-b1d9-270516457d4f-apiservice-cert\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.521661 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8dc54e54-b180-487b-b1d9-270516457d4f-webhook-cert\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.521703 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nqjp\" (UniqueName: \"kubernetes.io/projected/8dc54e54-b180-487b-b1d9-270516457d4f-kube-api-access-6nqjp\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.622644 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8dc54e54-b180-487b-b1d9-270516457d4f-apiservice-cert\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.622706 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8dc54e54-b180-487b-b1d9-270516457d4f-webhook-cert\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.622747 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nqjp\" (UniqueName: \"kubernetes.io/projected/8dc54e54-b180-487b-b1d9-270516457d4f-kube-api-access-6nqjp\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.629458 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8dc54e54-b180-487b-b1d9-270516457d4f-apiservice-cert\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.636449 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8dc54e54-b180-487b-b1d9-270516457d4f-webhook-cert\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.657677 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nqjp\" (UniqueName: \"kubernetes.io/projected/8dc54e54-b180-487b-b1d9-270516457d4f-kube-api-access-6nqjp\") pod \"metallb-operator-webhook-server-7c755f5864-r5hgn\" (UID: \"8dc54e54-b180-487b-b1d9-270516457d4f\") " pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.793471 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:07 crc kubenswrapper[5032]: I0122 16:18:07.821311 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8"] Jan 22 16:18:08 crc kubenswrapper[5032]: I0122 16:18:08.045012 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn"] Jan 22 16:18:08 crc kubenswrapper[5032]: I0122 16:18:08.172763 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cpn4v"] Jan 22 16:18:08 crc kubenswrapper[5032]: I0122 16:18:08.173182 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cpn4v" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="registry-server" containerID="cri-o://6fff7babb217a942b6707ba516a47b0ef4cec73387b27fe58677255a9d068c64" gracePeriod=2 Jan 22 16:18:08 crc kubenswrapper[5032]: I0122 16:18:08.787286 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" event={"ID":"2e6a0f38-95b2-48f5-80f9-612c5508912d","Type":"ContainerStarted","Data":"c69a65a69ad0b310ae59a63b92003268eaa96fca13a9df66b48443be3a3c1745"} Jan 22 16:18:08 crc kubenswrapper[5032]: I0122 16:18:08.789170 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" event={"ID":"8dc54e54-b180-487b-b1d9-270516457d4f","Type":"ContainerStarted","Data":"54c74171f57f091eca839cadfe4dc222ff5b2e3cc4dc513afb9028f59c7b80e9"} Jan 22 16:18:09 crc kubenswrapper[5032]: I0122 16:18:09.800950 5032 generic.go:334] "Generic (PLEG): container finished" podID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerID="6fff7babb217a942b6707ba516a47b0ef4cec73387b27fe58677255a9d068c64" exitCode=0 Jan 22 16:18:09 crc kubenswrapper[5032]: I0122 16:18:09.801048 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cpn4v" event={"ID":"e17f6b27-9d46-42fb-a3cc-5888d1a7d122","Type":"ContainerDied","Data":"6fff7babb217a942b6707ba516a47b0ef4cec73387b27fe58677255a9d068c64"} Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.614274 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.678157 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlmvs\" (UniqueName: \"kubernetes.io/projected/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-kube-api-access-dlmvs\") pod \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.678228 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-utilities\") pod \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.678477 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-catalog-content\") pod \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\" (UID: \"e17f6b27-9d46-42fb-a3cc-5888d1a7d122\") " Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.680621 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-utilities" (OuterVolumeSpecName: "utilities") pod "e17f6b27-9d46-42fb-a3cc-5888d1a7d122" (UID: "e17f6b27-9d46-42fb-a3cc-5888d1a7d122"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.688550 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-kube-api-access-dlmvs" (OuterVolumeSpecName: "kube-api-access-dlmvs") pod "e17f6b27-9d46-42fb-a3cc-5888d1a7d122" (UID: "e17f6b27-9d46-42fb-a3cc-5888d1a7d122"). InnerVolumeSpecName "kube-api-access-dlmvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.780652 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlmvs\" (UniqueName: \"kubernetes.io/projected/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-kube-api-access-dlmvs\") on node \"crc\" DevicePath \"\"" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.780688 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.811551 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e17f6b27-9d46-42fb-a3cc-5888d1a7d122" (UID: "e17f6b27-9d46-42fb-a3cc-5888d1a7d122"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.817060 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cpn4v" event={"ID":"e17f6b27-9d46-42fb-a3cc-5888d1a7d122","Type":"ContainerDied","Data":"a712cd33583b8e5fdc9399d8ffa58f4b363054d17815130632601b1c0d7a71d7"} Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.817112 5032 scope.go:117] "RemoveContainer" containerID="6fff7babb217a942b6707ba516a47b0ef4cec73387b27fe58677255a9d068c64" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.817230 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cpn4v" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.852065 5032 scope.go:117] "RemoveContainer" containerID="2e3bfd26878a253cdf7d8a2d48e3693eed6196938a3c275e6bc3899ef933c8ef" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.858668 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cpn4v"] Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.875971 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cpn4v"] Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.882774 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e17f6b27-9d46-42fb-a3cc-5888d1a7d122-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:18:10 crc kubenswrapper[5032]: I0122 16:18:10.904671 5032 scope.go:117] "RemoveContainer" containerID="7f06bcea6b2cb9403ec5fc9a6d259217619196e35b02445519680704615755e4" Jan 22 16:18:12 crc kubenswrapper[5032]: I0122 16:18:12.462138 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" path="/var/lib/kubelet/pods/e17f6b27-9d46-42fb-a3cc-5888d1a7d122/volumes" Jan 22 16:18:13 crc kubenswrapper[5032]: I0122 16:18:13.850896 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" event={"ID":"8dc54e54-b180-487b-b1d9-270516457d4f","Type":"ContainerStarted","Data":"9decfa5ede4f0340fabc9707a449709e3d3e4c6302400bc941aea1ad6b58ff8b"} Jan 22 16:18:13 crc kubenswrapper[5032]: I0122 16:18:13.851966 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:13 crc kubenswrapper[5032]: I0122 16:18:13.852698 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" event={"ID":"2e6a0f38-95b2-48f5-80f9-612c5508912d","Type":"ContainerStarted","Data":"710def8700de51234ad02c07d001ecf51d90132c51693a279c08141e8fee1e59"} Jan 22 16:18:13 crc kubenswrapper[5032]: I0122 16:18:13.852840 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:13 crc kubenswrapper[5032]: I0122 16:18:13.872326 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" podStartSLOduration=1.3234532589999999 podStartE2EDuration="6.872301932s" podCreationTimestamp="2026-01-22 16:18:07 +0000 UTC" firstStartedPulling="2026-01-22 16:18:08.062653211 +0000 UTC m=+791.874822242" lastFinishedPulling="2026-01-22 16:18:13.611501864 +0000 UTC m=+797.423670915" observedRunningTime="2026-01-22 16:18:13.868697859 +0000 UTC m=+797.680866900" watchObservedRunningTime="2026-01-22 16:18:13.872301932 +0000 UTC m=+797.684470973" Jan 22 16:18:13 crc kubenswrapper[5032]: I0122 16:18:13.888827 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" podStartSLOduration=1.133627896 podStartE2EDuration="6.888802637s" podCreationTimestamp="2026-01-22 16:18:07 +0000 UTC" firstStartedPulling="2026-01-22 16:18:07.83250492 +0000 UTC m=+791.644673951" lastFinishedPulling="2026-01-22 16:18:13.587679661 +0000 UTC m=+797.399848692" observedRunningTime="2026-01-22 16:18:13.883955722 +0000 UTC m=+797.696124783" watchObservedRunningTime="2026-01-22 16:18:13.888802637 +0000 UTC m=+797.700971668" Jan 22 16:18:27 crc kubenswrapper[5032]: I0122 16:18:27.799815 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7c755f5864-r5hgn" Jan 22 16:18:47 crc kubenswrapper[5032]: I0122 16:18:47.412134 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-79794f8d77-hg8k8" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.136233 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l"] Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.136566 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="extract-utilities" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.136595 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="extract-utilities" Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.136611 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="extract-content" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.136620 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="extract-content" Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.136654 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="registry-server" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.136664 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="registry-server" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.136805 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="e17f6b27-9d46-42fb-a3cc-5888d1a7d122" containerName="registry-server" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.137395 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.144346 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-zvksk" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.144358 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.152538 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-bkbn9"] Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.158837 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.160595 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l"] Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.161801 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.163313 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.221943 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-frr-conf\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222039 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9qtp\" (UniqueName: \"kubernetes.io/projected/e4695925-6a14-469e-848d-4f4c05601c65-kube-api-access-h9qtp\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222084 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-metrics\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222139 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-reloader\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222355 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9ms7l\" (UID: \"42fa9b50-fc10-4e57-a693-9b3d41a01ebe\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222425 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-495zd\" (UniqueName: \"kubernetes.io/projected/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-kube-api-access-495zd\") pod \"frr-k8s-webhook-server-7df86c4f6c-9ms7l\" (UID: \"42fa9b50-fc10-4e57-a693-9b3d41a01ebe\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222543 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e4695925-6a14-469e-848d-4f4c05601c65-frr-startup\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222599 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4695925-6a14-469e-848d-4f4c05601c65-metrics-certs\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.222657 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-frr-sockets\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.243050 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-nhgx7"] Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.243998 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.251213 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.251225 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.251442 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.251496 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-pg5s2" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.268752 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-pskfs"] Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.270523 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.272137 5032 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.295039 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-pskfs"] Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.323931 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-frr-conf\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.323999 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9qtp\" (UniqueName: \"kubernetes.io/projected/e4695925-6a14-469e-848d-4f4c05601c65-kube-api-access-h9qtp\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324025 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-metrics\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324046 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-cert\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324074 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvrbc\" (UniqueName: \"kubernetes.io/projected/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-kube-api-access-xvrbc\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324103 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324123 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-metrics-certs\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324144 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-reloader\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324170 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tcc4\" (UniqueName: \"kubernetes.io/projected/8c978f67-ff6a-436c-b801-c7d88a0550e7-kube-api-access-2tcc4\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324189 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9ms7l\" (UID: \"42fa9b50-fc10-4e57-a693-9b3d41a01ebe\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324208 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8c978f67-ff6a-436c-b801-c7d88a0550e7-metallb-excludel2\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324228 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-495zd\" (UniqueName: \"kubernetes.io/projected/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-kube-api-access-495zd\") pod \"frr-k8s-webhook-server-7df86c4f6c-9ms7l\" (UID: \"42fa9b50-fc10-4e57-a693-9b3d41a01ebe\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324250 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-metrics-certs\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324276 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e4695925-6a14-469e-848d-4f4c05601c65-frr-startup\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324295 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4695925-6a14-469e-848d-4f4c05601c65-metrics-certs\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324315 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-frr-sockets\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324739 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-frr-conf\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.324750 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-metrics\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.324988 5032 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.325078 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-cert podName:42fa9b50-fc10-4e57-a693-9b3d41a01ebe nodeName:}" failed. No retries permitted until 2026-01-22 16:18:48.825049614 +0000 UTC m=+832.637218645 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-cert") pod "frr-k8s-webhook-server-7df86c4f6c-9ms7l" (UID: "42fa9b50-fc10-4e57-a693-9b3d41a01ebe") : secret "frr-k8s-webhook-server-cert" not found Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.325553 5032 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.325598 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4695925-6a14-469e-848d-4f4c05601c65-metrics-certs podName:e4695925-6a14-469e-848d-4f4c05601c65 nodeName:}" failed. No retries permitted until 2026-01-22 16:18:48.825587928 +0000 UTC m=+832.637757209 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4695925-6a14-469e-848d-4f4c05601c65-metrics-certs") pod "frr-k8s-bkbn9" (UID: "e4695925-6a14-469e-848d-4f4c05601c65") : secret "frr-k8s-certs-secret" not found Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.326097 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e4695925-6a14-469e-848d-4f4c05601c65-frr-startup\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.328147 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-reloader\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.328157 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e4695925-6a14-469e-848d-4f4c05601c65-frr-sockets\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.360683 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9qtp\" (UniqueName: \"kubernetes.io/projected/e4695925-6a14-469e-848d-4f4c05601c65-kube-api-access-h9qtp\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.366356 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-495zd\" (UniqueName: \"kubernetes.io/projected/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-kube-api-access-495zd\") pod \"frr-k8s-webhook-server-7df86c4f6c-9ms7l\" (UID: \"42fa9b50-fc10-4e57-a693-9b3d41a01ebe\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.426212 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-cert\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.426301 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvrbc\" (UniqueName: \"kubernetes.io/projected/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-kube-api-access-xvrbc\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.426349 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.426412 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-metrics-certs\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.426441 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tcc4\" (UniqueName: \"kubernetes.io/projected/8c978f67-ff6a-436c-b801-c7d88a0550e7-kube-api-access-2tcc4\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.427061 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8c978f67-ff6a-436c-b801-c7d88a0550e7-metallb-excludel2\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.426591 5032 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.427145 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist podName:8c978f67-ff6a-436c-b801-c7d88a0550e7 nodeName:}" failed. No retries permitted until 2026-01-22 16:18:48.92712244 +0000 UTC m=+832.739291461 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist") pod "speaker-nhgx7" (UID: "8c978f67-ff6a-436c-b801-c7d88a0550e7") : secret "metallb-memberlist" not found Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.426635 5032 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.427323 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-metrics-certs podName:8c978f67-ff6a-436c-b801-c7d88a0550e7 nodeName:}" failed. No retries permitted until 2026-01-22 16:18:48.927315785 +0000 UTC m=+832.739484816 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-metrics-certs") pod "speaker-nhgx7" (UID: "8c978f67-ff6a-436c-b801-c7d88a0550e7") : secret "speaker-certs-secret" not found Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.427387 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-metrics-certs\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.428033 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8c978f67-ff6a-436c-b801-c7d88a0550e7-metallb-excludel2\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.430634 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-cert\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.430809 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-metrics-certs\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.451348 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvrbc\" (UniqueName: \"kubernetes.io/projected/86cb2f0d-5cf4-438d-9bd2-90e26f223d16-kube-api-access-xvrbc\") pod \"controller-6968d8fdc4-pskfs\" (UID: \"86cb2f0d-5cf4-438d-9bd2-90e26f223d16\") " pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.453331 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tcc4\" (UniqueName: \"kubernetes.io/projected/8c978f67-ff6a-436c-b801-c7d88a0550e7-kube-api-access-2tcc4\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.599547 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.834444 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4695925-6a14-469e-848d-4f4c05601c65-metrics-certs\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.835136 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9ms7l\" (UID: \"42fa9b50-fc10-4e57-a693-9b3d41a01ebe\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.840422 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42fa9b50-fc10-4e57-a693-9b3d41a01ebe-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9ms7l\" (UID: \"42fa9b50-fc10-4e57-a693-9b3d41a01ebe\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.840735 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4695925-6a14-469e-848d-4f4c05601c65-metrics-certs\") pod \"frr-k8s-bkbn9\" (UID: \"e4695925-6a14-469e-848d-4f4c05601c65\") " pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.940167 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.940234 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-metrics-certs\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.941010 5032 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 22 16:18:48 crc kubenswrapper[5032]: E0122 16:18:48.941143 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist podName:8c978f67-ff6a-436c-b801-c7d88a0550e7 nodeName:}" failed. No retries permitted until 2026-01-22 16:18:49.941108801 +0000 UTC m=+833.753278012 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist") pod "speaker-nhgx7" (UID: "8c978f67-ff6a-436c-b801-c7d88a0550e7") : secret "metallb-memberlist" not found Jan 22 16:18:48 crc kubenswrapper[5032]: I0122 16:18:48.948367 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-metrics-certs\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:49 crc kubenswrapper[5032]: I0122 16:18:49.062842 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-pskfs"] Jan 22 16:18:49 crc kubenswrapper[5032]: I0122 16:18:49.063313 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:18:49 crc kubenswrapper[5032]: I0122 16:18:49.081614 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-pskfs" event={"ID":"86cb2f0d-5cf4-438d-9bd2-90e26f223d16","Type":"ContainerStarted","Data":"a8e1c8632fa1b7ad44915828a314e0e85dd1615c0ea564b0b61393f8a7a654a5"} Jan 22 16:18:49 crc kubenswrapper[5032]: I0122 16:18:49.084433 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:18:49 crc kubenswrapper[5032]: I0122 16:18:49.347958 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l"] Jan 22 16:18:49 crc kubenswrapper[5032]: W0122 16:18:49.357346 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42fa9b50_fc10_4e57_a693_9b3d41a01ebe.slice/crio-d4baa12f09336b2691f8420d3a72d33eda3d59318593f71bd4b26dcd47e61de9 WatchSource:0}: Error finding container d4baa12f09336b2691f8420d3a72d33eda3d59318593f71bd4b26dcd47e61de9: Status 404 returned error can't find the container with id d4baa12f09336b2691f8420d3a72d33eda3d59318593f71bd4b26dcd47e61de9 Jan 22 16:18:49 crc kubenswrapper[5032]: I0122 16:18:49.957079 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:49 crc kubenswrapper[5032]: E0122 16:18:49.957328 5032 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 22 16:18:49 crc kubenswrapper[5032]: E0122 16:18:49.959266 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist podName:8c978f67-ff6a-436c-b801-c7d88a0550e7 nodeName:}" failed. No retries permitted until 2026-01-22 16:18:51.95920835 +0000 UTC m=+835.771377401 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist") pod "speaker-nhgx7" (UID: "8c978f67-ff6a-436c-b801-c7d88a0550e7") : secret "metallb-memberlist" not found Jan 22 16:18:50 crc kubenswrapper[5032]: I0122 16:18:50.094272 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" event={"ID":"42fa9b50-fc10-4e57-a693-9b3d41a01ebe","Type":"ContainerStarted","Data":"d4baa12f09336b2691f8420d3a72d33eda3d59318593f71bd4b26dcd47e61de9"} Jan 22 16:18:51 crc kubenswrapper[5032]: I0122 16:18:51.108350 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerStarted","Data":"fc729a267a7ee628ef61c535496a2a63aa78b01b1100b5bf90d3613d20e069a4"} Jan 22 16:18:51 crc kubenswrapper[5032]: I0122 16:18:51.116736 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-pskfs" event={"ID":"86cb2f0d-5cf4-438d-9bd2-90e26f223d16","Type":"ContainerStarted","Data":"255a7bdf4a5a9857b7e21bf7d9ee6905a18245ef3f873695d2a2039083f395a4"} Jan 22 16:18:51 crc kubenswrapper[5032]: I0122 16:18:51.116812 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-pskfs" event={"ID":"86cb2f0d-5cf4-438d-9bd2-90e26f223d16","Type":"ContainerStarted","Data":"1f3bbf329aeaf068f21d566467ebc92a828a2ba6abfe60fd1e400b883cfab6e7"} Jan 22 16:18:51 crc kubenswrapper[5032]: I0122 16:18:51.116919 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:18:51 crc kubenswrapper[5032]: I0122 16:18:51.141927 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-pskfs" podStartSLOduration=3.141905101 podStartE2EDuration="3.141905101s" podCreationTimestamp="2026-01-22 16:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:18:51.139964161 +0000 UTC m=+834.952133202" watchObservedRunningTime="2026-01-22 16:18:51.141905101 +0000 UTC m=+834.954074132" Jan 22 16:18:51 crc kubenswrapper[5032]: I0122 16:18:51.989443 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:52 crc kubenswrapper[5032]: I0122 16:18:52.008901 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8c978f67-ff6a-436c-b801-c7d88a0550e7-memberlist\") pod \"speaker-nhgx7\" (UID: \"8c978f67-ff6a-436c-b801-c7d88a0550e7\") " pod="metallb-system/speaker-nhgx7" Jan 22 16:18:52 crc kubenswrapper[5032]: I0122 16:18:52.162363 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-nhgx7" Jan 22 16:18:53 crc kubenswrapper[5032]: I0122 16:18:53.140609 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nhgx7" event={"ID":"8c978f67-ff6a-436c-b801-c7d88a0550e7","Type":"ContainerStarted","Data":"9c5d5b3547dab6fcdc575aaf161376259914561b8adb74c5a664e545beda7891"} Jan 22 16:18:53 crc kubenswrapper[5032]: I0122 16:18:53.141154 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nhgx7" event={"ID":"8c978f67-ff6a-436c-b801-c7d88a0550e7","Type":"ContainerStarted","Data":"a976840792175a87b8fadddfedbcb67139417e12a2fc289d1eb5046fec65ad3e"} Jan 22 16:18:53 crc kubenswrapper[5032]: I0122 16:18:53.141189 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nhgx7" event={"ID":"8c978f67-ff6a-436c-b801-c7d88a0550e7","Type":"ContainerStarted","Data":"7905cf92e958c26eec0c8f65b93f59fe62878722c0924912bdfd5c906a73cafb"} Jan 22 16:18:53 crc kubenswrapper[5032]: I0122 16:18:53.141372 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-nhgx7" Jan 22 16:18:53 crc kubenswrapper[5032]: I0122 16:18:53.171369 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-nhgx7" podStartSLOduration=5.171347525 podStartE2EDuration="5.171347525s" podCreationTimestamp="2026-01-22 16:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:18:53.167025224 +0000 UTC m=+836.979194255" watchObservedRunningTime="2026-01-22 16:18:53.171347525 +0000 UTC m=+836.983516556" Jan 22 16:19:01 crc kubenswrapper[5032]: I0122 16:19:01.196616 5032 generic.go:334] "Generic (PLEG): container finished" podID="e4695925-6a14-469e-848d-4f4c05601c65" containerID="b6cb691f93bfec5c5626e3ebece27e839010c0537767cf4d00886855009ec934" exitCode=0 Jan 22 16:19:01 crc kubenswrapper[5032]: I0122 16:19:01.196681 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerDied","Data":"b6cb691f93bfec5c5626e3ebece27e839010c0537767cf4d00886855009ec934"} Jan 22 16:19:01 crc kubenswrapper[5032]: I0122 16:19:01.199339 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" event={"ID":"42fa9b50-fc10-4e57-a693-9b3d41a01ebe","Type":"ContainerStarted","Data":"a541ab5107b6a1a74127667f074c6c5c706607d74bcb1dcc92bea27bf8addcc8"} Jan 22 16:19:01 crc kubenswrapper[5032]: I0122 16:19:01.199801 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:19:01 crc kubenswrapper[5032]: I0122 16:19:01.247137 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" podStartSLOduration=1.7404859959999999 podStartE2EDuration="13.247114537s" podCreationTimestamp="2026-01-22 16:18:48 +0000 UTC" firstStartedPulling="2026-01-22 16:18:49.360986602 +0000 UTC m=+833.173155643" lastFinishedPulling="2026-01-22 16:19:00.867615153 +0000 UTC m=+844.679784184" observedRunningTime="2026-01-22 16:19:01.243702074 +0000 UTC m=+845.055871125" watchObservedRunningTime="2026-01-22 16:19:01.247114537 +0000 UTC m=+845.059283568" Jan 22 16:19:02 crc kubenswrapper[5032]: I0122 16:19:02.167069 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-nhgx7" Jan 22 16:19:02 crc kubenswrapper[5032]: I0122 16:19:02.207509 5032 generic.go:334] "Generic (PLEG): container finished" podID="e4695925-6a14-469e-848d-4f4c05601c65" containerID="b0157b171a4aa58ce69a518b30971287b779f61c032827d0b422511e70f82b63" exitCode=0 Jan 22 16:19:02 crc kubenswrapper[5032]: I0122 16:19:02.207569 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerDied","Data":"b0157b171a4aa58ce69a518b30971287b779f61c032827d0b422511e70f82b63"} Jan 22 16:19:03 crc kubenswrapper[5032]: I0122 16:19:03.219430 5032 generic.go:334] "Generic (PLEG): container finished" podID="e4695925-6a14-469e-848d-4f4c05601c65" containerID="dad2d3e96d400191164f669aeee4b67423c495c6bd9deff6d90e70e4c36124e8" exitCode=0 Jan 22 16:19:03 crc kubenswrapper[5032]: I0122 16:19:03.219525 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerDied","Data":"dad2d3e96d400191164f669aeee4b67423c495c6bd9deff6d90e70e4c36124e8"} Jan 22 16:19:04 crc kubenswrapper[5032]: I0122 16:19:04.232419 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerStarted","Data":"217a3d8e04cfdd33bb0662b2f0c5a632d10b3a59346d62f4e2f473d570f6170c"} Jan 22 16:19:06 crc kubenswrapper[5032]: I0122 16:19:06.253050 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerStarted","Data":"df5b496d7c869b14ba84aa1f0d6398c999c453173836e9f990d542311eab4e5a"} Jan 22 16:19:06 crc kubenswrapper[5032]: I0122 16:19:06.253600 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:19:06 crc kubenswrapper[5032]: I0122 16:19:06.253618 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerStarted","Data":"42e7ea4507a7e7e06d2b9dbe2aede3122816799c6aacc055031aae4ba9adb9ec"} Jan 22 16:19:06 crc kubenswrapper[5032]: I0122 16:19:06.253633 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerStarted","Data":"399469c08c2123f474469c156cccd1ab26c2944eaa98cef938340dc8179699a3"} Jan 22 16:19:06 crc kubenswrapper[5032]: I0122 16:19:06.253649 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerStarted","Data":"2071d51fe2e104d85fa916bff897c03beae2701dc2e7093bf7aafcbffd0e16fd"} Jan 22 16:19:06 crc kubenswrapper[5032]: I0122 16:19:06.253662 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bkbn9" event={"ID":"e4695925-6a14-469e-848d-4f4c05601c65","Type":"ContainerStarted","Data":"e8430ff215229752c59544b28f55fa4f17ef551e312082a48764d5c1f02fa5fe"} Jan 22 16:19:06 crc kubenswrapper[5032]: I0122 16:19:06.277568 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-bkbn9" podStartSLOduration=7.730210761 podStartE2EDuration="18.277548242s" podCreationTimestamp="2026-01-22 16:18:48 +0000 UTC" firstStartedPulling="2026-01-22 16:18:50.356625062 +0000 UTC m=+834.168794103" lastFinishedPulling="2026-01-22 16:19:00.903962553 +0000 UTC m=+844.716131584" observedRunningTime="2026-01-22 16:19:06.274017046 +0000 UTC m=+850.086186087" watchObservedRunningTime="2026-01-22 16:19:06.277548242 +0000 UTC m=+850.089717263" Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.603706 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-pskfs" Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.781902 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-pdtz7"] Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.783059 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.786137 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.786241 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-kdxx5" Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.787156 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.795993 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pdtz7"] Jan 22 16:19:08 crc kubenswrapper[5032]: I0122 16:19:08.997921 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjtts\" (UniqueName: \"kubernetes.io/projected/19b316b1-1229-4a95-9d08-4a426582d827-kube-api-access-tjtts\") pod \"openstack-operator-index-pdtz7\" (UID: \"19b316b1-1229-4a95-9d08-4a426582d827\") " pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:09 crc kubenswrapper[5032]: I0122 16:19:09.085100 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:19:09 crc kubenswrapper[5032]: I0122 16:19:09.099761 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjtts\" (UniqueName: \"kubernetes.io/projected/19b316b1-1229-4a95-9d08-4a426582d827-kube-api-access-tjtts\") pod \"openstack-operator-index-pdtz7\" (UID: \"19b316b1-1229-4a95-9d08-4a426582d827\") " pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:09 crc kubenswrapper[5032]: I0122 16:19:09.123656 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:19:09 crc kubenswrapper[5032]: I0122 16:19:09.129378 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjtts\" (UniqueName: \"kubernetes.io/projected/19b316b1-1229-4a95-9d08-4a426582d827-kube-api-access-tjtts\") pod \"openstack-operator-index-pdtz7\" (UID: \"19b316b1-1229-4a95-9d08-4a426582d827\") " pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:09 crc kubenswrapper[5032]: I0122 16:19:09.213550 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:09 crc kubenswrapper[5032]: I0122 16:19:09.401850 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pdtz7"] Jan 22 16:19:10 crc kubenswrapper[5032]: I0122 16:19:10.282348 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pdtz7" event={"ID":"19b316b1-1229-4a95-9d08-4a426582d827","Type":"ContainerStarted","Data":"4d893c1efd87d7a225f87016765f2df2ac309662bfe32edf2a3a1f06b5307590"} Jan 22 16:19:19 crc kubenswrapper[5032]: I0122 16:19:19.074652 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9ms7l" Jan 22 16:19:19 crc kubenswrapper[5032]: I0122 16:19:19.087341 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-bkbn9" Jan 22 16:19:23 crc kubenswrapper[5032]: I0122 16:19:23.379039 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pdtz7" event={"ID":"19b316b1-1229-4a95-9d08-4a426582d827","Type":"ContainerStarted","Data":"fb872efffd9de19f0ff66afcb5de256dfad41beebb332d63d8b61d4a30daa805"} Jan 22 16:19:23 crc kubenswrapper[5032]: I0122 16:19:23.397933 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-pdtz7" podStartSLOduration=2.375203037 podStartE2EDuration="15.397899804s" podCreationTimestamp="2026-01-22 16:19:08 +0000 UTC" firstStartedPulling="2026-01-22 16:19:09.422813044 +0000 UTC m=+853.234982075" lastFinishedPulling="2026-01-22 16:19:22.445509801 +0000 UTC m=+866.257678842" observedRunningTime="2026-01-22 16:19:23.395221011 +0000 UTC m=+867.207390082" watchObservedRunningTime="2026-01-22 16:19:23.397899804 +0000 UTC m=+867.210068875" Jan 22 16:19:29 crc kubenswrapper[5032]: I0122 16:19:29.213685 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:29 crc kubenswrapper[5032]: I0122 16:19:29.215174 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:29 crc kubenswrapper[5032]: I0122 16:19:29.252663 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:30 crc kubenswrapper[5032]: I0122 16:19:30.320126 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-pdtz7" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.440071 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb"] Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.442583 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.446244 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-7xbkx" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.453584 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb"] Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.519621 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czfpl\" (UniqueName: \"kubernetes.io/projected/75283565-e2e5-46a8-a168-553c6c9cadc5-kube-api-access-czfpl\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.519700 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-util\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.519746 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-bundle\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.620798 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czfpl\" (UniqueName: \"kubernetes.io/projected/75283565-e2e5-46a8-a168-553c6c9cadc5-kube-api-access-czfpl\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.620872 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-util\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.620907 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-bundle\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.621359 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-bundle\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.621628 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-util\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.643091 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czfpl\" (UniqueName: \"kubernetes.io/projected/75283565-e2e5-46a8-a168-553c6c9cadc5-kube-api-access-czfpl\") pod \"b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:38 crc kubenswrapper[5032]: I0122 16:19:38.765049 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:39 crc kubenswrapper[5032]: I0122 16:19:39.013094 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb"] Jan 22 16:19:39 crc kubenswrapper[5032]: W0122 16:19:39.020323 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75283565_e2e5_46a8_a168_553c6c9cadc5.slice/crio-25dd0e371d40e650eb33e160c17cc33ce65e55d7a75d8454ae2a5649608476b2 WatchSource:0}: Error finding container 25dd0e371d40e650eb33e160c17cc33ce65e55d7a75d8454ae2a5649608476b2: Status 404 returned error can't find the container with id 25dd0e371d40e650eb33e160c17cc33ce65e55d7a75d8454ae2a5649608476b2 Jan 22 16:19:39 crc kubenswrapper[5032]: I0122 16:19:39.377447 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerStarted","Data":"3c602e1ffcfaafe44eaf9e76e384e801bc820032ce0417a80670eee8bb10a2d9"} Jan 22 16:19:39 crc kubenswrapper[5032]: I0122 16:19:39.377513 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerStarted","Data":"25dd0e371d40e650eb33e160c17cc33ce65e55d7a75d8454ae2a5649608476b2"} Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.388551 5032 generic.go:334] "Generic (PLEG): container finished" podID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerID="3c602e1ffcfaafe44eaf9e76e384e801bc820032ce0417a80670eee8bb10a2d9" exitCode=0 Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.388676 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerDied","Data":"3c602e1ffcfaafe44eaf9e76e384e801bc820032ce0417a80670eee8bb10a2d9"} Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.589505 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qgmfr"] Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.594840 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.606732 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgmfr"] Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.655393 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-utilities\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.655447 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-catalog-content\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.655481 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfc6c\" (UniqueName: \"kubernetes.io/projected/3ed0ad2a-3171-438a-8749-b115a413d852-kube-api-access-kfc6c\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.757562 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-utilities\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.757622 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-catalog-content\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.757669 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfc6c\" (UniqueName: \"kubernetes.io/projected/3ed0ad2a-3171-438a-8749-b115a413d852-kube-api-access-kfc6c\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.758725 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-utilities\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.758817 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-catalog-content\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.781927 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfc6c\" (UniqueName: \"kubernetes.io/projected/3ed0ad2a-3171-438a-8749-b115a413d852-kube-api-access-kfc6c\") pod \"redhat-marketplace-qgmfr\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:40 crc kubenswrapper[5032]: I0122 16:19:40.935606 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:41 crc kubenswrapper[5032]: I0122 16:19:41.130707 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgmfr"] Jan 22 16:19:41 crc kubenswrapper[5032]: I0122 16:19:41.395808 5032 generic.go:334] "Generic (PLEG): container finished" podID="3ed0ad2a-3171-438a-8749-b115a413d852" containerID="275d3528c7cc8c1f4ff378dbc985d0df8ba6d8438e9a811ea4379a869365548f" exitCode=0 Jan 22 16:19:41 crc kubenswrapper[5032]: I0122 16:19:41.395879 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgmfr" event={"ID":"3ed0ad2a-3171-438a-8749-b115a413d852","Type":"ContainerDied","Data":"275d3528c7cc8c1f4ff378dbc985d0df8ba6d8438e9a811ea4379a869365548f"} Jan 22 16:19:41 crc kubenswrapper[5032]: I0122 16:19:41.395912 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgmfr" event={"ID":"3ed0ad2a-3171-438a-8749-b115a413d852","Type":"ContainerStarted","Data":"89881519308c05ed3518aa62d6048fbbf9a9c7c1ab770a4ec0fed29c3413c405"} Jan 22 16:19:42 crc kubenswrapper[5032]: I0122 16:19:42.404058 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerStarted","Data":"a11b657e40f7dafc0717e1c68ecb149b70a9521a756eb692ea0d6885aef95a9d"} Jan 22 16:19:42 crc kubenswrapper[5032]: I0122 16:19:42.406253 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgmfr" event={"ID":"3ed0ad2a-3171-438a-8749-b115a413d852","Type":"ContainerStarted","Data":"5a6395cc3afeaf91d8f80bb8535115d8c6c927bcc28ae21498a37acb8c463ed6"} Jan 22 16:19:43 crc kubenswrapper[5032]: I0122 16:19:43.416564 5032 generic.go:334] "Generic (PLEG): container finished" podID="3ed0ad2a-3171-438a-8749-b115a413d852" containerID="5a6395cc3afeaf91d8f80bb8535115d8c6c927bcc28ae21498a37acb8c463ed6" exitCode=0 Jan 22 16:19:43 crc kubenswrapper[5032]: I0122 16:19:43.416944 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgmfr" event={"ID":"3ed0ad2a-3171-438a-8749-b115a413d852","Type":"ContainerDied","Data":"5a6395cc3afeaf91d8f80bb8535115d8c6c927bcc28ae21498a37acb8c463ed6"} Jan 22 16:19:43 crc kubenswrapper[5032]: I0122 16:19:43.425817 5032 generic.go:334] "Generic (PLEG): container finished" podID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerID="a11b657e40f7dafc0717e1c68ecb149b70a9521a756eb692ea0d6885aef95a9d" exitCode=0 Jan 22 16:19:43 crc kubenswrapper[5032]: I0122 16:19:43.425904 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerDied","Data":"a11b657e40f7dafc0717e1c68ecb149b70a9521a756eb692ea0d6885aef95a9d"} Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.392349 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bbhfh"] Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.394464 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.405103 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bbhfh"] Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.413730 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-catalog-content\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.413984 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6b27\" (UniqueName: \"kubernetes.io/projected/94f5498e-3526-42ee-98e2-7f3ecb014890-kube-api-access-n6b27\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.414314 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-utilities\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.441137 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerStarted","Data":"fa5e69eb2fa1989e9babe92d54361aadc388e7e77451450f9ad0e6c176532a91"} Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.471795 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" podStartSLOduration=4.777268857 podStartE2EDuration="6.471770177s" podCreationTimestamp="2026-01-22 16:19:38 +0000 UTC" firstStartedPulling="2026-01-22 16:19:40.391178486 +0000 UTC m=+884.203347547" lastFinishedPulling="2026-01-22 16:19:42.085679796 +0000 UTC m=+885.897848867" observedRunningTime="2026-01-22 16:19:44.466540045 +0000 UTC m=+888.278709066" watchObservedRunningTime="2026-01-22 16:19:44.471770177 +0000 UTC m=+888.283939218" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.515722 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-catalog-content\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.516025 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6b27\" (UniqueName: \"kubernetes.io/projected/94f5498e-3526-42ee-98e2-7f3ecb014890-kube-api-access-n6b27\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.516263 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-utilities\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.517659 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-catalog-content\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.518341 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-utilities\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.538075 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6b27\" (UniqueName: \"kubernetes.io/projected/94f5498e-3526-42ee-98e2-7f3ecb014890-kube-api-access-n6b27\") pod \"community-operators-bbhfh\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.716146 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:19:44 crc kubenswrapper[5032]: I0122 16:19:44.995048 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bbhfh"] Jan 22 16:19:45 crc kubenswrapper[5032]: W0122 16:19:45.001354 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94f5498e_3526_42ee_98e2_7f3ecb014890.slice/crio-3bf05d6da52058515233256799b06c3188f0a64f52a219c54ace0dff70ba224a WatchSource:0}: Error finding container 3bf05d6da52058515233256799b06c3188f0a64f52a219c54ace0dff70ba224a: Status 404 returned error can't find the container with id 3bf05d6da52058515233256799b06c3188f0a64f52a219c54ace0dff70ba224a Jan 22 16:19:45 crc kubenswrapper[5032]: I0122 16:19:45.451575 5032 generic.go:334] "Generic (PLEG): container finished" podID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerID="fa5e69eb2fa1989e9babe92d54361aadc388e7e77451450f9ad0e6c176532a91" exitCode=0 Jan 22 16:19:45 crc kubenswrapper[5032]: I0122 16:19:45.451734 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerDied","Data":"fa5e69eb2fa1989e9babe92d54361aadc388e7e77451450f9ad0e6c176532a91"} Jan 22 16:19:45 crc kubenswrapper[5032]: I0122 16:19:45.453784 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerStarted","Data":"3bf05d6da52058515233256799b06c3188f0a64f52a219c54ace0dff70ba224a"} Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.464137 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerStarted","Data":"b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9"} Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.751528 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.855698 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czfpl\" (UniqueName: \"kubernetes.io/projected/75283565-e2e5-46a8-a168-553c6c9cadc5-kube-api-access-czfpl\") pod \"75283565-e2e5-46a8-a168-553c6c9cadc5\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.856966 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-util\") pod \"75283565-e2e5-46a8-a168-553c6c9cadc5\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.857081 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-bundle\") pod \"75283565-e2e5-46a8-a168-553c6c9cadc5\" (UID: \"75283565-e2e5-46a8-a168-553c6c9cadc5\") " Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.857899 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-bundle" (OuterVolumeSpecName: "bundle") pod "75283565-e2e5-46a8-a168-553c6c9cadc5" (UID: "75283565-e2e5-46a8-a168-553c6c9cadc5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.861494 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75283565-e2e5-46a8-a168-553c6c9cadc5-kube-api-access-czfpl" (OuterVolumeSpecName: "kube-api-access-czfpl") pod "75283565-e2e5-46a8-a168-553c6c9cadc5" (UID: "75283565-e2e5-46a8-a168-553c6c9cadc5"). InnerVolumeSpecName "kube-api-access-czfpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.867867 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-util" (OuterVolumeSpecName: "util") pod "75283565-e2e5-46a8-a168-553c6c9cadc5" (UID: "75283565-e2e5-46a8-a168-553c6c9cadc5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.960644 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czfpl\" (UniqueName: \"kubernetes.io/projected/75283565-e2e5-46a8-a168-553c6c9cadc5-kube-api-access-czfpl\") on node \"crc\" DevicePath \"\"" Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.960691 5032 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-util\") on node \"crc\" DevicePath \"\"" Jan 22 16:19:46 crc kubenswrapper[5032]: I0122 16:19:46.960703 5032 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/75283565-e2e5-46a8-a168-553c6c9cadc5-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:19:47 crc kubenswrapper[5032]: I0122 16:19:47.475348 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" Jan 22 16:19:47 crc kubenswrapper[5032]: I0122 16:19:47.475359 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb" event={"ID":"75283565-e2e5-46a8-a168-553c6c9cadc5","Type":"ContainerDied","Data":"25dd0e371d40e650eb33e160c17cc33ce65e55d7a75d8454ae2a5649608476b2"} Jan 22 16:19:47 crc kubenswrapper[5032]: I0122 16:19:47.475421 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25dd0e371d40e650eb33e160c17cc33ce65e55d7a75d8454ae2a5649608476b2" Jan 22 16:19:47 crc kubenswrapper[5032]: I0122 16:19:47.478631 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgmfr" event={"ID":"3ed0ad2a-3171-438a-8749-b115a413d852","Type":"ContainerStarted","Data":"1dc8b83eb4a2b517dda83568e30248723ac0ad497413195b8e44ceda55425fed"} Jan 22 16:19:47 crc kubenswrapper[5032]: I0122 16:19:47.481450 5032 generic.go:334] "Generic (PLEG): container finished" podID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerID="b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9" exitCode=0 Jan 22 16:19:47 crc kubenswrapper[5032]: I0122 16:19:47.481497 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerDied","Data":"b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9"} Jan 22 16:19:47 crc kubenswrapper[5032]: I0122 16:19:47.512395 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qgmfr" podStartSLOduration=2.385054957 podStartE2EDuration="7.51237069s" podCreationTimestamp="2026-01-22 16:19:40 +0000 UTC" firstStartedPulling="2026-01-22 16:19:41.397397705 +0000 UTC m=+885.209566736" lastFinishedPulling="2026-01-22 16:19:46.524713438 +0000 UTC m=+890.336882469" observedRunningTime="2026-01-22 16:19:47.506315415 +0000 UTC m=+891.318484446" watchObservedRunningTime="2026-01-22 16:19:47.51237069 +0000 UTC m=+891.324539721" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.503962 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc"] Jan 22 16:19:49 crc kubenswrapper[5032]: E0122 16:19:49.504618 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerName="extract" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.504635 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerName="extract" Jan 22 16:19:49 crc kubenswrapper[5032]: E0122 16:19:49.504651 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerName="util" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.504658 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerName="util" Jan 22 16:19:49 crc kubenswrapper[5032]: E0122 16:19:49.504676 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerName="pull" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.504683 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerName="pull" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.504848 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="75283565-e2e5-46a8-a168-553c6c9cadc5" containerName="extract" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.505404 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.523517 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-hzccq" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.595204 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfvkl\" (UniqueName: \"kubernetes.io/projected/81045473-a36f-4725-8a9c-d7de6defd405-kube-api-access-sfvkl\") pod \"openstack-operator-controller-init-79b4b78677-pjcqc\" (UID: \"81045473-a36f-4725-8a9c-d7de6defd405\") " pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.635752 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc"] Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.696607 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfvkl\" (UniqueName: \"kubernetes.io/projected/81045473-a36f-4725-8a9c-d7de6defd405-kube-api-access-sfvkl\") pod \"openstack-operator-controller-init-79b4b78677-pjcqc\" (UID: \"81045473-a36f-4725-8a9c-d7de6defd405\") " pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.715258 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfvkl\" (UniqueName: \"kubernetes.io/projected/81045473-a36f-4725-8a9c-d7de6defd405-kube-api-access-sfvkl\") pod \"openstack-operator-controller-init-79b4b78677-pjcqc\" (UID: \"81045473-a36f-4725-8a9c-d7de6defd405\") " pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" Jan 22 16:19:49 crc kubenswrapper[5032]: I0122 16:19:49.820181 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" Jan 22 16:19:50 crc kubenswrapper[5032]: I0122 16:19:50.433264 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc"] Jan 22 16:19:50 crc kubenswrapper[5032]: W0122 16:19:50.439111 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81045473_a36f_4725_8a9c_d7de6defd405.slice/crio-d1a13c269ca63d239ea41269846afbb1fa9b58b44dc300688f3db5ab0bf2a0ec WatchSource:0}: Error finding container d1a13c269ca63d239ea41269846afbb1fa9b58b44dc300688f3db5ab0bf2a0ec: Status 404 returned error can't find the container with id d1a13c269ca63d239ea41269846afbb1fa9b58b44dc300688f3db5ab0bf2a0ec Jan 22 16:19:50 crc kubenswrapper[5032]: I0122 16:19:50.501723 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerStarted","Data":"a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400"} Jan 22 16:19:50 crc kubenswrapper[5032]: I0122 16:19:50.503521 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" event={"ID":"81045473-a36f-4725-8a9c-d7de6defd405","Type":"ContainerStarted","Data":"d1a13c269ca63d239ea41269846afbb1fa9b58b44dc300688f3db5ab0bf2a0ec"} Jan 22 16:19:50 crc kubenswrapper[5032]: I0122 16:19:50.936668 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:50 crc kubenswrapper[5032]: I0122 16:19:50.936717 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:50 crc kubenswrapper[5032]: I0122 16:19:50.990657 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:51 crc kubenswrapper[5032]: I0122 16:19:51.513000 5032 generic.go:334] "Generic (PLEG): container finished" podID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerID="a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400" exitCode=0 Jan 22 16:19:51 crc kubenswrapper[5032]: I0122 16:19:51.513104 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerDied","Data":"a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400"} Jan 22 16:19:51 crc kubenswrapper[5032]: I0122 16:19:51.587492 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:52 crc kubenswrapper[5032]: I0122 16:19:52.766877 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgmfr"] Jan 22 16:19:53 crc kubenswrapper[5032]: I0122 16:19:53.529996 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qgmfr" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="registry-server" containerID="cri-o://1dc8b83eb4a2b517dda83568e30248723ac0ad497413195b8e44ceda55425fed" gracePeriod=2 Jan 22 16:19:55 crc kubenswrapper[5032]: I0122 16:19:55.548635 5032 generic.go:334] "Generic (PLEG): container finished" podID="3ed0ad2a-3171-438a-8749-b115a413d852" containerID="1dc8b83eb4a2b517dda83568e30248723ac0ad497413195b8e44ceda55425fed" exitCode=0 Jan 22 16:19:55 crc kubenswrapper[5032]: I0122 16:19:55.548724 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgmfr" event={"ID":"3ed0ad2a-3171-438a-8749-b115a413d852","Type":"ContainerDied","Data":"1dc8b83eb4a2b517dda83568e30248723ac0ad497413195b8e44ceda55425fed"} Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.280844 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.390080 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-catalog-content\") pod \"3ed0ad2a-3171-438a-8749-b115a413d852\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.390269 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfc6c\" (UniqueName: \"kubernetes.io/projected/3ed0ad2a-3171-438a-8749-b115a413d852-kube-api-access-kfc6c\") pod \"3ed0ad2a-3171-438a-8749-b115a413d852\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.390348 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-utilities\") pod \"3ed0ad2a-3171-438a-8749-b115a413d852\" (UID: \"3ed0ad2a-3171-438a-8749-b115a413d852\") " Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.391637 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-utilities" (OuterVolumeSpecName: "utilities") pod "3ed0ad2a-3171-438a-8749-b115a413d852" (UID: "3ed0ad2a-3171-438a-8749-b115a413d852"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.404206 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ed0ad2a-3171-438a-8749-b115a413d852-kube-api-access-kfc6c" (OuterVolumeSpecName: "kube-api-access-kfc6c") pod "3ed0ad2a-3171-438a-8749-b115a413d852" (UID: "3ed0ad2a-3171-438a-8749-b115a413d852"). InnerVolumeSpecName "kube-api-access-kfc6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.417539 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ed0ad2a-3171-438a-8749-b115a413d852" (UID: "3ed0ad2a-3171-438a-8749-b115a413d852"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.492350 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.492786 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfc6c\" (UniqueName: \"kubernetes.io/projected/3ed0ad2a-3171-438a-8749-b115a413d852-kube-api-access-kfc6c\") on node \"crc\" DevicePath \"\"" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.492803 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ed0ad2a-3171-438a-8749-b115a413d852-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.558199 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgmfr" event={"ID":"3ed0ad2a-3171-438a-8749-b115a413d852","Type":"ContainerDied","Data":"89881519308c05ed3518aa62d6048fbbf9a9c7c1ab770a4ec0fed29c3413c405"} Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.558257 5032 scope.go:117] "RemoveContainer" containerID="1dc8b83eb4a2b517dda83568e30248723ac0ad497413195b8e44ceda55425fed" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.558328 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgmfr" Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.584684 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgmfr"] Jan 22 16:19:56 crc kubenswrapper[5032]: I0122 16:19:56.589153 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgmfr"] Jan 22 16:19:58 crc kubenswrapper[5032]: I0122 16:19:58.462052 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" path="/var/lib/kubelet/pods/3ed0ad2a-3171-438a-8749-b115a413d852/volumes" Jan 22 16:19:58 crc kubenswrapper[5032]: I0122 16:19:58.681266 5032 scope.go:117] "RemoveContainer" containerID="5a6395cc3afeaf91d8f80bb8535115d8c6c927bcc28ae21498a37acb8c463ed6" Jan 22 16:19:59 crc kubenswrapper[5032]: I0122 16:19:59.259006 5032 scope.go:117] "RemoveContainer" containerID="275d3528c7cc8c1f4ff378dbc985d0df8ba6d8438e9a811ea4379a869365548f" Jan 22 16:20:00 crc kubenswrapper[5032]: I0122 16:20:00.987837 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mssbp"] Jan 22 16:20:00 crc kubenswrapper[5032]: E0122 16:20:00.988659 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="extract-content" Jan 22 16:20:00 crc kubenswrapper[5032]: I0122 16:20:00.988680 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="extract-content" Jan 22 16:20:00 crc kubenswrapper[5032]: E0122 16:20:00.988704 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="extract-utilities" Jan 22 16:20:00 crc kubenswrapper[5032]: I0122 16:20:00.988713 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="extract-utilities" Jan 22 16:20:00 crc kubenswrapper[5032]: E0122 16:20:00.988738 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="registry-server" Jan 22 16:20:00 crc kubenswrapper[5032]: I0122 16:20:00.988747 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="registry-server" Jan 22 16:20:00 crc kubenswrapper[5032]: I0122 16:20:00.989047 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ed0ad2a-3171-438a-8749-b115a413d852" containerName="registry-server" Jan 22 16:20:00 crc kubenswrapper[5032]: I0122 16:20:00.990756 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.009435 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mssbp"] Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.173261 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-utilities\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.173310 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z46g8\" (UniqueName: \"kubernetes.io/projected/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-kube-api-access-z46g8\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.173368 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-catalog-content\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.275031 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-utilities\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.275075 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z46g8\" (UniqueName: \"kubernetes.io/projected/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-kube-api-access-z46g8\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.275135 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-catalog-content\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.275722 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-catalog-content\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.275744 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-utilities\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.300032 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z46g8\" (UniqueName: \"kubernetes.io/projected/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-kube-api-access-z46g8\") pod \"certified-operators-mssbp\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.313845 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.606732 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" event={"ID":"81045473-a36f-4725-8a9c-d7de6defd405","Type":"ContainerStarted","Data":"dd111312501b14eeea29e6355f51d06753776f07f396b7cab0ac7e3cd5be97b8"} Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.607148 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.610621 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerStarted","Data":"4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84"} Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.635271 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" podStartSLOduration=2.307347286 podStartE2EDuration="12.635251907s" podCreationTimestamp="2026-01-22 16:19:49 +0000 UTC" firstStartedPulling="2026-01-22 16:19:50.441929641 +0000 UTC m=+894.254098672" lastFinishedPulling="2026-01-22 16:20:00.769834272 +0000 UTC m=+904.582003293" observedRunningTime="2026-01-22 16:20:01.632470972 +0000 UTC m=+905.444640003" watchObservedRunningTime="2026-01-22 16:20:01.635251907 +0000 UTC m=+905.447420938" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.656744 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bbhfh" podStartSLOduration=6.467469826 podStartE2EDuration="17.656724882s" podCreationTimestamp="2026-01-22 16:19:44 +0000 UTC" firstStartedPulling="2026-01-22 16:19:47.483423552 +0000 UTC m=+891.295592623" lastFinishedPulling="2026-01-22 16:19:58.672678648 +0000 UTC m=+902.484847679" observedRunningTime="2026-01-22 16:20:01.651958722 +0000 UTC m=+905.464127743" watchObservedRunningTime="2026-01-22 16:20:01.656724882 +0000 UTC m=+905.468893913" Jan 22 16:20:01 crc kubenswrapper[5032]: I0122 16:20:01.787832 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mssbp"] Jan 22 16:20:01 crc kubenswrapper[5032]: W0122 16:20:01.802229 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda237d0fc_8ce0_4a82_a94e_4e537e2013ef.slice/crio-c80972fd180e046ce5b6b232f2b2ece248aa13f471effe810d7068e71b2d58d3 WatchSource:0}: Error finding container c80972fd180e046ce5b6b232f2b2ece248aa13f471effe810d7068e71b2d58d3: Status 404 returned error can't find the container with id c80972fd180e046ce5b6b232f2b2ece248aa13f471effe810d7068e71b2d58d3 Jan 22 16:20:02 crc kubenswrapper[5032]: I0122 16:20:02.616358 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mssbp" event={"ID":"a237d0fc-8ce0-4a82-a94e-4e537e2013ef","Type":"ContainerStarted","Data":"c80972fd180e046ce5b6b232f2b2ece248aa13f471effe810d7068e71b2d58d3"} Jan 22 16:20:04 crc kubenswrapper[5032]: I0122 16:20:04.634686 5032 generic.go:334] "Generic (PLEG): container finished" podID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerID="ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122" exitCode=0 Jan 22 16:20:04 crc kubenswrapper[5032]: I0122 16:20:04.634763 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mssbp" event={"ID":"a237d0fc-8ce0-4a82-a94e-4e537e2013ef","Type":"ContainerDied","Data":"ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122"} Jan 22 16:20:04 crc kubenswrapper[5032]: I0122 16:20:04.717683 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:20:04 crc kubenswrapper[5032]: I0122 16:20:04.717741 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:20:04 crc kubenswrapper[5032]: I0122 16:20:04.777456 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:20:05 crc kubenswrapper[5032]: I0122 16:20:05.681250 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:20:06 crc kubenswrapper[5032]: I0122 16:20:06.650284 5032 generic.go:334] "Generic (PLEG): container finished" podID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerID="aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3" exitCode=0 Jan 22 16:20:06 crc kubenswrapper[5032]: I0122 16:20:06.650391 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mssbp" event={"ID":"a237d0fc-8ce0-4a82-a94e-4e537e2013ef","Type":"ContainerDied","Data":"aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3"} Jan 22 16:20:07 crc kubenswrapper[5032]: I0122 16:20:07.172417 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bbhfh"] Jan 22 16:20:07 crc kubenswrapper[5032]: I0122 16:20:07.658157 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bbhfh" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="registry-server" containerID="cri-o://4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84" gracePeriod=2 Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.579835 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.664954 5032 generic.go:334] "Generic (PLEG): container finished" podID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerID="4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84" exitCode=0 Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.665019 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerDied","Data":"4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84"} Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.665048 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbhfh" event={"ID":"94f5498e-3526-42ee-98e2-7f3ecb014890","Type":"ContainerDied","Data":"3bf05d6da52058515233256799b06c3188f0a64f52a219c54ace0dff70ba224a"} Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.665065 5032 scope.go:117] "RemoveContainer" containerID="4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.665176 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbhfh" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.668035 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mssbp" event={"ID":"a237d0fc-8ce0-4a82-a94e-4e537e2013ef","Type":"ContainerStarted","Data":"0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8"} Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.679156 5032 scope.go:117] "RemoveContainer" containerID="a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.684825 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mssbp" podStartSLOduration=5.589843397 podStartE2EDuration="8.684806279s" podCreationTimestamp="2026-01-22 16:20:00 +0000 UTC" firstStartedPulling="2026-01-22 16:20:04.636000855 +0000 UTC m=+908.448169936" lastFinishedPulling="2026-01-22 16:20:07.730963787 +0000 UTC m=+911.543132818" observedRunningTime="2026-01-22 16:20:08.68371981 +0000 UTC m=+912.495888841" watchObservedRunningTime="2026-01-22 16:20:08.684806279 +0000 UTC m=+912.496975300" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.699697 5032 scope.go:117] "RemoveContainer" containerID="b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.716691 5032 scope.go:117] "RemoveContainer" containerID="4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84" Jan 22 16:20:08 crc kubenswrapper[5032]: E0122 16:20:08.717107 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84\": container with ID starting with 4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84 not found: ID does not exist" containerID="4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.717148 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84"} err="failed to get container status \"4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84\": rpc error: code = NotFound desc = could not find container \"4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84\": container with ID starting with 4894437c10fc1cd595859f95fc347b0be0637f5b8b40952e4d864b18972e4a84 not found: ID does not exist" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.717174 5032 scope.go:117] "RemoveContainer" containerID="a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400" Jan 22 16:20:08 crc kubenswrapper[5032]: E0122 16:20:08.717567 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400\": container with ID starting with a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400 not found: ID does not exist" containerID="a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.717621 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400"} err="failed to get container status \"a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400\": rpc error: code = NotFound desc = could not find container \"a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400\": container with ID starting with a1091d55e5dec61c85d690b5430d87243de5f18849465ef55dddce3ed24e5400 not found: ID does not exist" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.717651 5032 scope.go:117] "RemoveContainer" containerID="b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9" Jan 22 16:20:08 crc kubenswrapper[5032]: E0122 16:20:08.718103 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9\": container with ID starting with b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9 not found: ID does not exist" containerID="b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.718125 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9"} err="failed to get container status \"b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9\": rpc error: code = NotFound desc = could not find container \"b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9\": container with ID starting with b4f3cd9b7df136050501e4a3fa876cde241ad8fc28968327213fcf7b761565c9 not found: ID does not exist" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.781501 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-utilities\") pod \"94f5498e-3526-42ee-98e2-7f3ecb014890\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.781573 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6b27\" (UniqueName: \"kubernetes.io/projected/94f5498e-3526-42ee-98e2-7f3ecb014890-kube-api-access-n6b27\") pod \"94f5498e-3526-42ee-98e2-7f3ecb014890\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.781764 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-catalog-content\") pod \"94f5498e-3526-42ee-98e2-7f3ecb014890\" (UID: \"94f5498e-3526-42ee-98e2-7f3ecb014890\") " Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.782654 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-utilities" (OuterVolumeSpecName: "utilities") pod "94f5498e-3526-42ee-98e2-7f3ecb014890" (UID: "94f5498e-3526-42ee-98e2-7f3ecb014890"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.787327 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94f5498e-3526-42ee-98e2-7f3ecb014890-kube-api-access-n6b27" (OuterVolumeSpecName: "kube-api-access-n6b27") pod "94f5498e-3526-42ee-98e2-7f3ecb014890" (UID: "94f5498e-3526-42ee-98e2-7f3ecb014890"). InnerVolumeSpecName "kube-api-access-n6b27". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.789436 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.789472 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6b27\" (UniqueName: \"kubernetes.io/projected/94f5498e-3526-42ee-98e2-7f3ecb014890-kube-api-access-n6b27\") on node \"crc\" DevicePath \"\"" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.835304 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94f5498e-3526-42ee-98e2-7f3ecb014890" (UID: "94f5498e-3526-42ee-98e2-7f3ecb014890"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.891275 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f5498e-3526-42ee-98e2-7f3ecb014890-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:20:08 crc kubenswrapper[5032]: I0122 16:20:08.995299 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bbhfh"] Jan 22 16:20:09 crc kubenswrapper[5032]: I0122 16:20:09.003073 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bbhfh"] Jan 22 16:20:09 crc kubenswrapper[5032]: I0122 16:20:09.824626 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-79b4b78677-pjcqc" Jan 22 16:20:10 crc kubenswrapper[5032]: I0122 16:20:10.477098 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" path="/var/lib/kubelet/pods/94f5498e-3526-42ee-98e2-7f3ecb014890/volumes" Jan 22 16:20:11 crc kubenswrapper[5032]: I0122 16:20:11.315213 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:11 crc kubenswrapper[5032]: I0122 16:20:11.315259 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:11 crc kubenswrapper[5032]: I0122 16:20:11.351269 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:21 crc kubenswrapper[5032]: I0122 16:20:21.355118 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:21 crc kubenswrapper[5032]: I0122 16:20:21.409186 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mssbp"] Jan 22 16:20:21 crc kubenswrapper[5032]: I0122 16:20:21.748282 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mssbp" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="registry-server" containerID="cri-o://0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8" gracePeriod=2 Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.613581 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.755136 5032 generic.go:334] "Generic (PLEG): container finished" podID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerID="0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8" exitCode=0 Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.755174 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mssbp" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.755190 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mssbp" event={"ID":"a237d0fc-8ce0-4a82-a94e-4e537e2013ef","Type":"ContainerDied","Data":"0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8"} Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.755972 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mssbp" event={"ID":"a237d0fc-8ce0-4a82-a94e-4e537e2013ef","Type":"ContainerDied","Data":"c80972fd180e046ce5b6b232f2b2ece248aa13f471effe810d7068e71b2d58d3"} Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.755995 5032 scope.go:117] "RemoveContainer" containerID="0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.770720 5032 scope.go:117] "RemoveContainer" containerID="aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.784427 5032 scope.go:117] "RemoveContainer" containerID="ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.799111 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-utilities\") pod \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.799222 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-catalog-content\") pod \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.799335 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z46g8\" (UniqueName: \"kubernetes.io/projected/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-kube-api-access-z46g8\") pod \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\" (UID: \"a237d0fc-8ce0-4a82-a94e-4e537e2013ef\") " Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.800562 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-utilities" (OuterVolumeSpecName: "utilities") pod "a237d0fc-8ce0-4a82-a94e-4e537e2013ef" (UID: "a237d0fc-8ce0-4a82-a94e-4e537e2013ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.805519 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-kube-api-access-z46g8" (OuterVolumeSpecName: "kube-api-access-z46g8") pod "a237d0fc-8ce0-4a82-a94e-4e537e2013ef" (UID: "a237d0fc-8ce0-4a82-a94e-4e537e2013ef"). InnerVolumeSpecName "kube-api-access-z46g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.809483 5032 scope.go:117] "RemoveContainer" containerID="0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8" Jan 22 16:20:22 crc kubenswrapper[5032]: E0122 16:20:22.809932 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8\": container with ID starting with 0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8 not found: ID does not exist" containerID="0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.809993 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8"} err="failed to get container status \"0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8\": rpc error: code = NotFound desc = could not find container \"0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8\": container with ID starting with 0c699182a127b21d920e1b6b0d14175d27baa5f751d9590290341933c02bbdb8 not found: ID does not exist" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.810026 5032 scope.go:117] "RemoveContainer" containerID="aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3" Jan 22 16:20:22 crc kubenswrapper[5032]: E0122 16:20:22.810502 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3\": container with ID starting with aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3 not found: ID does not exist" containerID="aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.810544 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3"} err="failed to get container status \"aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3\": rpc error: code = NotFound desc = could not find container \"aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3\": container with ID starting with aedf678cb7682ce7dd0aba66f35ad4a1be75e9fc6fb67829ba9935e46fc159c3 not found: ID does not exist" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.810572 5032 scope.go:117] "RemoveContainer" containerID="ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122" Jan 22 16:20:22 crc kubenswrapper[5032]: E0122 16:20:22.810874 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122\": container with ID starting with ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122 not found: ID does not exist" containerID="ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.810900 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122"} err="failed to get container status \"ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122\": rpc error: code = NotFound desc = could not find container \"ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122\": container with ID starting with ac48d2bbc57510d67c295ae2876ef622f803e2f328ef8b0f40ba03b0636bf122 not found: ID does not exist" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.847242 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a237d0fc-8ce0-4a82-a94e-4e537e2013ef" (UID: "a237d0fc-8ce0-4a82-a94e-4e537e2013ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.900667 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.900743 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z46g8\" (UniqueName: \"kubernetes.io/projected/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-kube-api-access-z46g8\") on node \"crc\" DevicePath \"\"" Jan 22 16:20:22 crc kubenswrapper[5032]: I0122 16:20:22.900758 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a237d0fc-8ce0-4a82-a94e-4e537e2013ef-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:20:23 crc kubenswrapper[5032]: I0122 16:20:23.086919 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mssbp"] Jan 22 16:20:23 crc kubenswrapper[5032]: I0122 16:20:23.096124 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mssbp"] Jan 22 16:20:24 crc kubenswrapper[5032]: I0122 16:20:24.466652 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" path="/var/lib/kubelet/pods/a237d0fc-8ce0-4a82-a94e-4e537e2013ef/volumes" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.748071 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72"] Jan 22 16:20:30 crc kubenswrapper[5032]: E0122 16:20:30.748774 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="registry-server" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.748789 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="registry-server" Jan 22 16:20:30 crc kubenswrapper[5032]: E0122 16:20:30.748802 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="extract-utilities" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.748810 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="extract-utilities" Jan 22 16:20:30 crc kubenswrapper[5032]: E0122 16:20:30.748819 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="registry-server" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.748828 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="registry-server" Jan 22 16:20:30 crc kubenswrapper[5032]: E0122 16:20:30.748844 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="extract-content" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.748851 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="extract-content" Jan 22 16:20:30 crc kubenswrapper[5032]: E0122 16:20:30.748881 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="extract-utilities" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.748889 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="extract-utilities" Jan 22 16:20:30 crc kubenswrapper[5032]: E0122 16:20:30.748900 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="extract-content" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.748907 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="extract-content" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.749051 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="94f5498e-3526-42ee-98e2-7f3ecb014890" containerName="registry-server" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.749068 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a237d0fc-8ce0-4a82-a94e-4e537e2013ef" containerName="registry-server" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.749507 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.753674 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-2sc5z" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.764383 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.773631 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.774606 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.776426 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-hmffh" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.778611 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.779662 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.781944 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-p4npz" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.788185 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.814001 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8nx4\" (UniqueName: \"kubernetes.io/projected/7f26e503-3b5f-4a4d-9b23-c8b742fef2a9-kube-api-access-t8nx4\") pod \"barbican-operator-controller-manager-59dd8b7cbf-wzh72\" (UID: \"7f26e503-3b5f-4a4d-9b23-c8b742fef2a9\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.814096 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95pkt\" (UniqueName: \"kubernetes.io/projected/57b66df0-2faf-4f1a-980c-55094e9abda4-kube-api-access-95pkt\") pod \"cinder-operator-controller-manager-69cf5d4557-k9425\" (UID: \"57b66df0-2faf-4f1a-980c-55094e9abda4\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.814148 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjnjb\" (UniqueName: \"kubernetes.io/projected/e558895e-9fa5-46dc-a852-2b876aa9d8f3-kube-api-access-qjnjb\") pod \"designate-operator-controller-manager-b45d7bf98-4gkbq\" (UID: \"e558895e-9fa5-46dc-a852-2b876aa9d8f3\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.814236 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.824939 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.826059 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.836535 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-827kl" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.849986 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.850753 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.855008 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-mzvz2" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.868993 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.898048 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.907640 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.908425 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.910387 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-9glwt" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.915881 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8nx4\" (UniqueName: \"kubernetes.io/projected/7f26e503-3b5f-4a4d-9b23-c8b742fef2a9-kube-api-access-t8nx4\") pod \"barbican-operator-controller-manager-59dd8b7cbf-wzh72\" (UID: \"7f26e503-3b5f-4a4d-9b23-c8b742fef2a9\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.917670 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95pkt\" (UniqueName: \"kubernetes.io/projected/57b66df0-2faf-4f1a-980c-55094e9abda4-kube-api-access-95pkt\") pod \"cinder-operator-controller-manager-69cf5d4557-k9425\" (UID: \"57b66df0-2faf-4f1a-980c-55094e9abda4\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.917772 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjnjb\" (UniqueName: \"kubernetes.io/projected/e558895e-9fa5-46dc-a852-2b876aa9d8f3-kube-api-access-qjnjb\") pod \"designate-operator-controller-manager-b45d7bf98-4gkbq\" (UID: \"e558895e-9fa5-46dc-a852-2b876aa9d8f3\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.921962 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27"] Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.923266 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.925079 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-5q7ms" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.926340 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.944248 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8nx4\" (UniqueName: \"kubernetes.io/projected/7f26e503-3b5f-4a4d-9b23-c8b742fef2a9-kube-api-access-t8nx4\") pod \"barbican-operator-controller-manager-59dd8b7cbf-wzh72\" (UID: \"7f26e503-3b5f-4a4d-9b23-c8b742fef2a9\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.947484 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjnjb\" (UniqueName: \"kubernetes.io/projected/e558895e-9fa5-46dc-a852-2b876aa9d8f3-kube-api-access-qjnjb\") pod \"designate-operator-controller-manager-b45d7bf98-4gkbq\" (UID: \"e558895e-9fa5-46dc-a852-2b876aa9d8f3\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.948627 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95pkt\" (UniqueName: \"kubernetes.io/projected/57b66df0-2faf-4f1a-980c-55094e9abda4-kube-api-access-95pkt\") pod \"cinder-operator-controller-manager-69cf5d4557-k9425\" (UID: \"57b66df0-2faf-4f1a-980c-55094e9abda4\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" Jan 22 16:20:30 crc kubenswrapper[5032]: I0122 16:20:30.993106 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.008495 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.009520 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.018651 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-htdt2" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.018899 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wxj7\" (UniqueName: \"kubernetes.io/projected/b0a92c7d-3eb6-4d18-a03c-82b380ba29d9-kube-api-access-4wxj7\") pod \"heat-operator-controller-manager-594c8c9d5d-jtrqh\" (UID: \"b0a92c7d-3eb6-4d18-a03c-82b380ba29d9\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.018958 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcvtz\" (UniqueName: \"kubernetes.io/projected/e09867e2-02a7-4b8d-a388-78cb4a00b687-kube-api-access-jcvtz\") pod \"glance-operator-controller-manager-78fdd796fd-9r28s\" (UID: \"e09867e2-02a7-4b8d-a388-78cb4a00b687\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.019047 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwpwl\" (UniqueName: \"kubernetes.io/projected/59dad380-97a8-4d1a-9dd7-823d8be84b09-kube-api-access-rwpwl\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.019075 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.019148 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5t7h\" (UniqueName: \"kubernetes.io/projected/550ebf15-3945-4782-b3b0-c32a34743e65-kube-api-access-n5t7h\") pod \"ironic-operator-controller-manager-69d6c9f5b8-46xgl\" (UID: \"550ebf15-3945-4782-b3b0-c32a34743e65\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.019173 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj9zp\" (UniqueName: \"kubernetes.io/projected/f7d15cd8-d231-493f-ace5-b52e78dff983-kube-api-access-hj9zp\") pod \"horizon-operator-controller-manager-77d5c5b54f-jtvgl\" (UID: \"f7d15cd8-d231-493f-ace5-b52e78dff983\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.031491 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.044980 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.045773 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.057208 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-xlqrx" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.060902 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.069342 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.084389 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.096994 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.097572 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.098089 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.111225 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.122986 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.123783 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5t7h\" (UniqueName: \"kubernetes.io/projected/550ebf15-3945-4782-b3b0-c32a34743e65-kube-api-access-n5t7h\") pod \"ironic-operator-controller-manager-69d6c9f5b8-46xgl\" (UID: \"550ebf15-3945-4782-b3b0-c32a34743e65\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.123823 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hj9zp\" (UniqueName: \"kubernetes.io/projected/f7d15cd8-d231-493f-ace5-b52e78dff983-kube-api-access-hj9zp\") pod \"horizon-operator-controller-manager-77d5c5b54f-jtvgl\" (UID: \"f7d15cd8-d231-493f-ace5-b52e78dff983\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.123903 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wxj7\" (UniqueName: \"kubernetes.io/projected/b0a92c7d-3eb6-4d18-a03c-82b380ba29d9-kube-api-access-4wxj7\") pod \"heat-operator-controller-manager-594c8c9d5d-jtrqh\" (UID: \"b0a92c7d-3eb6-4d18-a03c-82b380ba29d9\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.123925 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcvtz\" (UniqueName: \"kubernetes.io/projected/e09867e2-02a7-4b8d-a388-78cb4a00b687-kube-api-access-jcvtz\") pod \"glance-operator-controller-manager-78fdd796fd-9r28s\" (UID: \"e09867e2-02a7-4b8d-a388-78cb4a00b687\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.124777 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwpwl\" (UniqueName: \"kubernetes.io/projected/59dad380-97a8-4d1a-9dd7-823d8be84b09-kube-api-access-rwpwl\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.124816 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.124972 5032 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.125027 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert podName:59dad380-97a8-4d1a-9dd7-823d8be84b09 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:31.625006505 +0000 UTC m=+935.437175536 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert") pod "infra-operator-controller-manager-54ccf4f85d-42r27" (UID: "59dad380-97a8-4d1a-9dd7-823d8be84b09") : secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.125072 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-hmlmz" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.150450 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.151575 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.164525 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5t7h\" (UniqueName: \"kubernetes.io/projected/550ebf15-3945-4782-b3b0-c32a34743e65-kube-api-access-n5t7h\") pod \"ironic-operator-controller-manager-69d6c9f5b8-46xgl\" (UID: \"550ebf15-3945-4782-b3b0-c32a34743e65\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.166036 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-mfjhb" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.169184 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.169260 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.170384 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.182779 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wxj7\" (UniqueName: \"kubernetes.io/projected/b0a92c7d-3eb6-4d18-a03c-82b380ba29d9-kube-api-access-4wxj7\") pod \"heat-operator-controller-manager-594c8c9d5d-jtrqh\" (UID: \"b0a92c7d-3eb6-4d18-a03c-82b380ba29d9\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.183202 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.185541 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-2krmr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.186689 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.187441 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj9zp\" (UniqueName: \"kubernetes.io/projected/f7d15cd8-d231-493f-ace5-b52e78dff983-kube-api-access-hj9zp\") pod \"horizon-operator-controller-manager-77d5c5b54f-jtvgl\" (UID: \"f7d15cd8-d231-493f-ace5-b52e78dff983\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.187733 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.195443 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwpwl\" (UniqueName: \"kubernetes.io/projected/59dad380-97a8-4d1a-9dd7-823d8be84b09-kube-api-access-rwpwl\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.197677 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcvtz\" (UniqueName: \"kubernetes.io/projected/e09867e2-02a7-4b8d-a388-78cb4a00b687-kube-api-access-jcvtz\") pod \"glance-operator-controller-manager-78fdd796fd-9r28s\" (UID: \"e09867e2-02a7-4b8d-a388-78cb4a00b687\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.207210 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-kz6jp" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.227147 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.231300 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.232417 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tw66\" (UniqueName: \"kubernetes.io/projected/62453126-30f2-45a5-9b4d-d4844d39107d-kube-api-access-7tw66\") pod \"keystone-operator-controller-manager-b8b6d4659-nqhbr\" (UID: \"62453126-30f2-45a5-9b4d-d4844d39107d\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.232465 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97qgb\" (UniqueName: \"kubernetes.io/projected/d7204606-7e41-46b7-bc8f-ffb0088f839e-kube-api-access-97qgb\") pod \"manila-operator-controller-manager-78c6999f6f-th8gj\" (UID: \"d7204606-7e41-46b7-bc8f-ffb0088f839e\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.238551 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.252984 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.253782 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.253874 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.257001 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-4xncp" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.268986 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.270184 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.281319 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-gmd5j" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.281576 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.287961 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.289256 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.309523 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-2jxss" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.334781 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.335255 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.336906 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nltt\" (UniqueName: \"kubernetes.io/projected/c4713406-4a5e-4bfd-9a06-3c233d2875f4-kube-api-access-9nltt\") pod \"nova-operator-controller-manager-6b8bc8d87d-7xr7k\" (UID: \"c4713406-4a5e-4bfd-9a06-3c233d2875f4\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.336948 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw9q6\" (UniqueName: \"kubernetes.io/projected/f4c46099-26f5-4475-976d-1852dd96b9f8-kube-api-access-qw9q6\") pod \"mariadb-operator-controller-manager-c87fff755-28l4v\" (UID: \"f4c46099-26f5-4475-976d-1852dd96b9f8\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.336985 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.337015 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6kck\" (UniqueName: \"kubernetes.io/projected/c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5-kube-api-access-m6kck\") pod \"octavia-operator-controller-manager-7bd9774b6-4dtdf\" (UID: \"c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.337049 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57787\" (UniqueName: \"kubernetes.io/projected/f9abaa76-29ed-4f41-9e98-0d6852a98e01-kube-api-access-57787\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.337088 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tw66\" (UniqueName: \"kubernetes.io/projected/62453126-30f2-45a5-9b4d-d4844d39107d-kube-api-access-7tw66\") pod \"keystone-operator-controller-manager-b8b6d4659-nqhbr\" (UID: \"62453126-30f2-45a5-9b4d-d4844d39107d\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.337115 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz22v\" (UniqueName: \"kubernetes.io/projected/8556c821-4b39-4898-8426-197ab92a52ec-kube-api-access-gz22v\") pod \"neutron-operator-controller-manager-8597d6d6d5-2wsj8\" (UID: \"8556c821-4b39-4898-8426-197ab92a52ec\") " pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.337152 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97qgb\" (UniqueName: \"kubernetes.io/projected/d7204606-7e41-46b7-bc8f-ffb0088f839e-kube-api-access-97qgb\") pod \"manila-operator-controller-manager-78c6999f6f-th8gj\" (UID: \"d7204606-7e41-46b7-bc8f-ffb0088f839e\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.337201 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk256\" (UniqueName: \"kubernetes.io/projected/a4c62d00-dba9-4309-9588-e5cb36dd675f-kube-api-access-rk256\") pod \"ovn-operator-controller-manager-55db956ddc-ljq9b\" (UID: \"a4c62d00-dba9-4309-9588-e5cb36dd675f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.356452 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.363331 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.365695 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.378751 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tw66\" (UniqueName: \"kubernetes.io/projected/62453126-30f2-45a5-9b4d-d4844d39107d-kube-api-access-7tw66\") pod \"keystone-operator-controller-manager-b8b6d4659-nqhbr\" (UID: \"62453126-30f2-45a5-9b4d-d4844d39107d\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.378811 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.384439 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-6z5r9" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.384847 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.414963 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97qgb\" (UniqueName: \"kubernetes.io/projected/d7204606-7e41-46b7-bc8f-ffb0088f839e-kube-api-access-97qgb\") pod \"manila-operator-controller-manager-78c6999f6f-th8gj\" (UID: \"d7204606-7e41-46b7-bc8f-ffb0088f839e\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.445585 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw9q6\" (UniqueName: \"kubernetes.io/projected/f4c46099-26f5-4475-976d-1852dd96b9f8-kube-api-access-qw9q6\") pod \"mariadb-operator-controller-manager-c87fff755-28l4v\" (UID: \"f4c46099-26f5-4475-976d-1852dd96b9f8\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.445637 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.445663 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6kck\" (UniqueName: \"kubernetes.io/projected/c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5-kube-api-access-m6kck\") pod \"octavia-operator-controller-manager-7bd9774b6-4dtdf\" (UID: \"c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.445690 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57787\" (UniqueName: \"kubernetes.io/projected/f9abaa76-29ed-4f41-9e98-0d6852a98e01-kube-api-access-57787\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.445724 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz22v\" (UniqueName: \"kubernetes.io/projected/8556c821-4b39-4898-8426-197ab92a52ec-kube-api-access-gz22v\") pod \"neutron-operator-controller-manager-8597d6d6d5-2wsj8\" (UID: \"8556c821-4b39-4898-8426-197ab92a52ec\") " pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.445769 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk256\" (UniqueName: \"kubernetes.io/projected/a4c62d00-dba9-4309-9588-e5cb36dd675f-kube-api-access-rk256\") pod \"ovn-operator-controller-manager-55db956ddc-ljq9b\" (UID: \"a4c62d00-dba9-4309-9588-e5cb36dd675f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.445815 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nltt\" (UniqueName: \"kubernetes.io/projected/c4713406-4a5e-4bfd-9a06-3c233d2875f4-kube-api-access-9nltt\") pod \"nova-operator-controller-manager-6b8bc8d87d-7xr7k\" (UID: \"c4713406-4a5e-4bfd-9a06-3c233d2875f4\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.446011 5032 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.446122 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert podName:f9abaa76-29ed-4f41-9e98-0d6852a98e01 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:31.946062563 +0000 UTC m=+935.758231754 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" (UID: "f9abaa76-29ed-4f41-9e98-0d6852a98e01") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.464128 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.470385 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.471095 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz22v\" (UniqueName: \"kubernetes.io/projected/8556c821-4b39-4898-8426-197ab92a52ec-kube-api-access-gz22v\") pod \"neutron-operator-controller-manager-8597d6d6d5-2wsj8\" (UID: \"8556c821-4b39-4898-8426-197ab92a52ec\") " pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.474327 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.478022 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6kck\" (UniqueName: \"kubernetes.io/projected/c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5-kube-api-access-m6kck\") pod \"octavia-operator-controller-manager-7bd9774b6-4dtdf\" (UID: \"c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.480710 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-7cgrl" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.494328 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk256\" (UniqueName: \"kubernetes.io/projected/a4c62d00-dba9-4309-9588-e5cb36dd675f-kube-api-access-rk256\") pod \"ovn-operator-controller-manager-55db956ddc-ljq9b\" (UID: \"a4c62d00-dba9-4309-9588-e5cb36dd675f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.496335 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw9q6\" (UniqueName: \"kubernetes.io/projected/f4c46099-26f5-4475-976d-1852dd96b9f8-kube-api-access-qw9q6\") pod \"mariadb-operator-controller-manager-c87fff755-28l4v\" (UID: \"f4c46099-26f5-4475-976d-1852dd96b9f8\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.499382 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nltt\" (UniqueName: \"kubernetes.io/projected/c4713406-4a5e-4bfd-9a06-3c233d2875f4-kube-api-access-9nltt\") pod \"nova-operator-controller-manager-6b8bc8d87d-7xr7k\" (UID: \"c4713406-4a5e-4bfd-9a06-3c233d2875f4\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.499559 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57787\" (UniqueName: \"kubernetes.io/projected/f9abaa76-29ed-4f41-9e98-0d6852a98e01-kube-api-access-57787\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.500691 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.501552 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.503373 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-shhgc" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.525161 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.537615 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.548561 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-td7fr\" (UniqueName: \"kubernetes.io/projected/9d75008c-d650-41f9-9c0f-beceefae917f-kube-api-access-td7fr\") pod \"placement-operator-controller-manager-5d646b7d76-5cxj6\" (UID: \"9d75008c-d650-41f9-9c0f-beceefae917f\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.549469 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.575325 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.585752 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.594987 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.619935 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-w27st"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.621635 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.623006 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-vrlmf" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.631985 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.639573 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-w27st"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.649987 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-td7fr\" (UniqueName: \"kubernetes.io/projected/9d75008c-d650-41f9-9c0f-beceefae917f-kube-api-access-td7fr\") pod \"placement-operator-controller-manager-5d646b7d76-5cxj6\" (UID: \"9d75008c-d650-41f9-9c0f-beceefae917f\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.650544 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l77dc\" (UniqueName: \"kubernetes.io/projected/ff38f03d-52c0-42dc-85e9-9a67709d0d74-kube-api-access-l77dc\") pod \"telemetry-operator-controller-manager-85cd9769bb-x8dvh\" (UID: \"ff38f03d-52c0-42dc-85e9-9a67709d0d74\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.650703 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqhwd\" (UniqueName: \"kubernetes.io/projected/e7b1c714-1088-419e-9e6b-d553a73583a5-kube-api-access-kqhwd\") pod \"swift-operator-controller-manager-547cbdb99f-cqbtr\" (UID: \"e7b1c714-1088-419e-9e6b-d553a73583a5\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.650789 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.651044 5032 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.651150 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert podName:59dad380-97a8-4d1a-9dd7-823d8be84b09 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:32.651133857 +0000 UTC m=+936.463302888 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert") pod "infra-operator-controller-manager-54ccf4f85d-42r27" (UID: "59dad380-97a8-4d1a-9dd7-823d8be84b09") : secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.657830 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.659637 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.662529 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.667420 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-5l6vj" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.671045 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-td7fr\" (UniqueName: \"kubernetes.io/projected/9d75008c-d650-41f9-9c0f-beceefae917f-kube-api-access-td7fr\") pod \"placement-operator-controller-manager-5d646b7d76-5cxj6\" (UID: \"9d75008c-d650-41f9-9c0f-beceefae917f\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.685541 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.689631 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.690389 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.691418 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.693666 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.695778 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-v8gnm" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.703779 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.711507 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.723079 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.724169 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.731761 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-p9zgn" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.745823 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5"] Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.752510 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhrxd\" (UniqueName: \"kubernetes.io/projected/739bb968-5cd4-460a-a812-dba945e8a695-kube-api-access-jhrxd\") pod \"watcher-operator-controller-manager-5ffb9c6597-g98vr\" (UID: \"739bb968-5cd4-460a-a812-dba945e8a695\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.752607 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqhwd\" (UniqueName: \"kubernetes.io/projected/e7b1c714-1088-419e-9e6b-d553a73583a5-kube-api-access-kqhwd\") pod \"swift-operator-controller-manager-547cbdb99f-cqbtr\" (UID: \"e7b1c714-1088-419e-9e6b-d553a73583a5\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.752668 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkldb\" (UniqueName: \"kubernetes.io/projected/4f11f845-b9f4-4720-bddd-e5c56ef8a893-kube-api-access-hkldb\") pod \"test-operator-controller-manager-69797bbcbd-w27st\" (UID: \"4f11f845-b9f4-4720-bddd-e5c56ef8a893\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.752742 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l77dc\" (UniqueName: \"kubernetes.io/projected/ff38f03d-52c0-42dc-85e9-9a67709d0d74-kube-api-access-l77dc\") pod \"telemetry-operator-controller-manager-85cd9769bb-x8dvh\" (UID: \"ff38f03d-52c0-42dc-85e9-9a67709d0d74\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.777656 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l77dc\" (UniqueName: \"kubernetes.io/projected/ff38f03d-52c0-42dc-85e9-9a67709d0d74-kube-api-access-l77dc\") pod \"telemetry-operator-controller-manager-85cd9769bb-x8dvh\" (UID: \"ff38f03d-52c0-42dc-85e9-9a67709d0d74\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.791089 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqhwd\" (UniqueName: \"kubernetes.io/projected/e7b1c714-1088-419e-9e6b-d553a73583a5-kube-api-access-kqhwd\") pod \"swift-operator-controller-manager-547cbdb99f-cqbtr\" (UID: \"e7b1c714-1088-419e-9e6b-d553a73583a5\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.855213 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkldb\" (UniqueName: \"kubernetes.io/projected/4f11f845-b9f4-4720-bddd-e5c56ef8a893-kube-api-access-hkldb\") pod \"test-operator-controller-manager-69797bbcbd-w27st\" (UID: \"4f11f845-b9f4-4720-bddd-e5c56ef8a893\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.855284 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.855341 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhzlq\" (UniqueName: \"kubernetes.io/projected/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-kube-api-access-zhzlq\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.855400 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhrxd\" (UniqueName: \"kubernetes.io/projected/739bb968-5cd4-460a-a812-dba945e8a695-kube-api-access-jhrxd\") pod \"watcher-operator-controller-manager-5ffb9c6597-g98vr\" (UID: \"739bb968-5cd4-460a-a812-dba945e8a695\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.855467 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.855521 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktrsr\" (UniqueName: \"kubernetes.io/projected/fc432eb8-8449-4f05-9441-18726488a595-kube-api-access-ktrsr\") pod \"rabbitmq-cluster-operator-manager-668c99d594-46dr5\" (UID: \"fc432eb8-8449-4f05-9441-18726488a595\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.877003 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhrxd\" (UniqueName: \"kubernetes.io/projected/739bb968-5cd4-460a-a812-dba945e8a695-kube-api-access-jhrxd\") pod \"watcher-operator-controller-manager-5ffb9c6597-g98vr\" (UID: \"739bb968-5cd4-460a-a812-dba945e8a695\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.880275 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkldb\" (UniqueName: \"kubernetes.io/projected/4f11f845-b9f4-4720-bddd-e5c56ef8a893-kube-api-access-hkldb\") pod \"test-operator-controller-manager-69797bbcbd-w27st\" (UID: \"4f11f845-b9f4-4720-bddd-e5c56ef8a893\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.957176 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhzlq\" (UniqueName: \"kubernetes.io/projected/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-kube-api-access-zhzlq\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.957259 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.957309 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.957357 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktrsr\" (UniqueName: \"kubernetes.io/projected/fc432eb8-8449-4f05-9441-18726488a595-kube-api-access-ktrsr\") pod \"rabbitmq-cluster-operator-manager-668c99d594-46dr5\" (UID: \"fc432eb8-8449-4f05-9441-18726488a595\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.957416 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.957567 5032 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.957648 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:32.457614022 +0000 UTC m=+936.269783043 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "metrics-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.957737 5032 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.957812 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:32.457787486 +0000 UTC m=+936.269956717 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.957902 5032 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: E0122 16:20:31.957936 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert podName:f9abaa76-29ed-4f41-9e98-0d6852a98e01 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:32.95792693 +0000 UTC m=+936.770095961 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" (UID: "f9abaa76-29ed-4f41-9e98-0d6852a98e01") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.974487 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktrsr\" (UniqueName: \"kubernetes.io/projected/fc432eb8-8449-4f05-9441-18726488a595-kube-api-access-ktrsr\") pod \"rabbitmq-cluster-operator-manager-668c99d594-46dr5\" (UID: \"fc432eb8-8449-4f05-9441-18726488a595\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" Jan 22 16:20:31 crc kubenswrapper[5032]: I0122 16:20:31.983016 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhzlq\" (UniqueName: \"kubernetes.io/projected/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-kube-api-access-zhzlq\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.011613 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.048355 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.069755 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.120159 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.150589 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.173692 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.357991 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.370873 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.377357 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq"] Jan 22 16:20:32 crc kubenswrapper[5032]: W0122 16:20:32.380093 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode558895e_9fa5_46dc_a852_2b876aa9d8f3.slice/crio-b5b643214fa137ee613f72d98a9bca321697c37dc88ced3305a20873a3052e1d WatchSource:0}: Error finding container b5b643214fa137ee613f72d98a9bca321697c37dc88ced3305a20873a3052e1d: Status 404 returned error can't find the container with id b5b643214fa137ee613f72d98a9bca321697c37dc88ced3305a20873a3052e1d Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.392632 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh"] Jan 22 16:20:32 crc kubenswrapper[5032]: W0122 16:20:32.393273 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode09867e2_02a7_4b8d_a388_78cb4a00b687.slice/crio-658f0716be79e77f3dce4c2147a80046f1c7ab80f0df86331ac665bf85c00e8a WatchSource:0}: Error finding container 658f0716be79e77f3dce4c2147a80046f1c7ab80f0df86331ac665bf85c00e8a: Status 404 returned error can't find the container with id 658f0716be79e77f3dce4c2147a80046f1c7ab80f0df86331ac665bf85c00e8a Jan 22 16:20:32 crc kubenswrapper[5032]: W0122 16:20:32.395092 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0a92c7d_3eb6_4d18_a03c_82b380ba29d9.slice/crio-2fb9a0309e680b1b6c78023eed42fb9ed1d2db2e379262f149b409bd4a156db1 WatchSource:0}: Error finding container 2fb9a0309e680b1b6c78023eed42fb9ed1d2db2e379262f149b409bd4a156db1: Status 404 returned error can't find the container with id 2fb9a0309e680b1b6c78023eed42fb9ed1d2db2e379262f149b409bd4a156db1 Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.398768 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.469346 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.469417 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.469530 5032 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.469571 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:33.469556852 +0000 UTC m=+937.281725883 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "metrics-server-cert" not found Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.469607 5032 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.469627 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:33.469621913 +0000 UTC m=+937.281790934 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "webhook-server-cert" not found Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.672403 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.672539 5032 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.672583 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert podName:59dad380-97a8-4d1a-9dd7-823d8be84b09 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:34.6725691 +0000 UTC m=+938.484738131 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert") pod "infra-operator-controller-manager-54ccf4f85d-42r27" (UID: "59dad380-97a8-4d1a-9dd7-823d8be84b09") : secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.822000 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.833643 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.840782 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.843617 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" event={"ID":"b0a92c7d-3eb6-4d18-a03c-82b380ba29d9","Type":"ContainerStarted","Data":"2fb9a0309e680b1b6c78023eed42fb9ed1d2db2e379262f149b409bd4a156db1"} Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.856705 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.858882 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" event={"ID":"a4c62d00-dba9-4309-9588-e5cb36dd675f","Type":"ContainerStarted","Data":"bc58c8d6a05cd88d00c04712a5f662093ecc18a34a22b51e2bd69610031ef72e"} Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.866200 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.868031 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" event={"ID":"57b66df0-2faf-4f1a-980c-55094e9abda4","Type":"ContainerStarted","Data":"74f37d75367088ae0917a7a874d3dc77c296e41b1bafd416b66b17024354a035"} Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.881158 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" event={"ID":"f7d15cd8-d231-493f-ace5-b52e78dff983","Type":"ContainerStarted","Data":"694795d0ca89745aaa498305d71f1554808b03634dcadb1779723c25a6af82c8"} Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.901925 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9nltt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-6b8bc8d87d-7xr7k_openstack-operators(c4713406-4a5e-4bfd-9a06-3c233d2875f4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.902807 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jhrxd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5ffb9c6597-g98vr_openstack-operators(739bb968-5cd4-460a-a812-dba945e8a695): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 16:20:32 crc kubenswrapper[5032]: W0122 16:20:32.903999 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff38f03d_52c0_42dc_85e9_9a67709d0d74.slice/crio-a6ee9fb3c5708a36d04c89cbcfc46718f69e5cce71ec60f041c72c1a0c46c0fb WatchSource:0}: Error finding container a6ee9fb3c5708a36d04c89cbcfc46718f69e5cce71ec60f041c72c1a0c46c0fb: Status 404 returned error can't find the container with id a6ee9fb3c5708a36d04c89cbcfc46718f69e5cce71ec60f041c72c1a0c46c0fb Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.904115 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" event={"ID":"e09867e2-02a7-4b8d-a388-78cb4a00b687","Type":"ContainerStarted","Data":"658f0716be79e77f3dce4c2147a80046f1c7ab80f0df86331ac665bf85c00e8a"} Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.904216 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" podUID="c4713406-4a5e-4bfd-9a06-3c233d2875f4" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.904261 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" podUID="739bb968-5cd4-460a-a812-dba945e8a695" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.905630 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m6kck,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7bd9774b6-4dtdf_openstack-operators(c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.906785 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" podUID="c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.907042 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-td7fr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5d646b7d76-5cxj6_openstack-operators(9d75008c-d650-41f9-9c0f-beceefae917f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.907301 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" event={"ID":"7f26e503-3b5f-4a4d-9b23-c8b742fef2a9","Type":"ContainerStarted","Data":"c86beaabfd56821207961a4b4140e09f11388a8677995ba21e48af81807c09ad"} Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.908156 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" podUID="9d75008c-d650-41f9-9c0f-beceefae917f" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.908673 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" event={"ID":"e558895e-9fa5-46dc-a852-2b876aa9d8f3","Type":"ContainerStarted","Data":"b5b643214fa137ee613f72d98a9bca321697c37dc88ced3305a20873a3052e1d"} Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.908982 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l77dc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-85cd9769bb-x8dvh_openstack-operators(ff38f03d-52c0-42dc-85e9-9a67709d0d74): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.909659 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ktrsr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-46dr5_openstack-operators(fc432eb8-8449-4f05-9441-18726488a595): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.910172 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf"] Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.910255 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" podUID="ff38f03d-52c0-42dc-85e9-9a67709d0d74" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.910816 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" podUID="fc432eb8-8449-4f05-9441-18726488a595" Jan 22 16:20:32 crc kubenswrapper[5032]: W0122 16:20:32.919828 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f11f845_b9f4_4720_bddd_e5c56ef8a893.slice/crio-972f952629a1114fb22357d7b22f71a1918a6c5ad6a2739a4cfb0de7ac3a126d WatchSource:0}: Error finding container 972f952629a1114fb22357d7b22f71a1918a6c5ad6a2739a4cfb0de7ac3a126d: Status 404 returned error can't find the container with id 972f952629a1114fb22357d7b22f71a1918a6c5ad6a2739a4cfb0de7ac3a126d Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.921179 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hkldb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-w27st_openstack-operators(4f11f845-b9f4-4720-bddd-e5c56ef8a893): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.921273 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl"] Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.922498 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" podUID="4f11f845-b9f4-4720-bddd-e5c56ef8a893" Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.925778 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.930153 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.935082 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.940708 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.945230 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.948908 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.956014 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-w27st"] Jan 22 16:20:32 crc kubenswrapper[5032]: I0122 16:20:32.990103 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.990304 5032 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:32 crc kubenswrapper[5032]: E0122 16:20:32.990376 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert podName:f9abaa76-29ed-4f41-9e98-0d6852a98e01 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:34.99035779 +0000 UTC m=+938.802526821 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" (UID: "f9abaa76-29ed-4f41-9e98-0d6852a98e01") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.017248 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.017306 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.499052 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.499251 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.499472 5032 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.499592 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:35.499560295 +0000 UTC m=+939.311729366 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "webhook-server-cert" not found Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.499961 5032 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.500235 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:35.500204293 +0000 UTC m=+939.312373354 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "metrics-server-cert" not found Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.918484 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" event={"ID":"d7204606-7e41-46b7-bc8f-ffb0088f839e","Type":"ContainerStarted","Data":"e6181eaf3280f360481b4ee965482911f40b822d326b4effff821c20d6daeca9"} Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.920623 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" event={"ID":"fc432eb8-8449-4f05-9441-18726488a595","Type":"ContainerStarted","Data":"d8b436833d46c0af012e7ec828c987dacc2d3c47bbb91bdb94f0562b9fc273fc"} Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.921938 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" podUID="fc432eb8-8449-4f05-9441-18726488a595" Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.924269 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" event={"ID":"550ebf15-3945-4782-b3b0-c32a34743e65","Type":"ContainerStarted","Data":"39e89b1c4fdfff4358121872a8ec4655215201d4bada78f7eafaee9c4431848c"} Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.928224 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" event={"ID":"e7b1c714-1088-419e-9e6b-d553a73583a5","Type":"ContainerStarted","Data":"072300d04c30fbb21a43cd3f4f0b9940107ce9666b2493aaa01ab5fe7cf73a56"} Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.930737 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" event={"ID":"62453126-30f2-45a5-9b4d-d4844d39107d","Type":"ContainerStarted","Data":"deb3892e0539d1025d4c5343f26280f2f88d2d433b360c81ea31fb6a9a96cf4a"} Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.932294 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" event={"ID":"9d75008c-d650-41f9-9c0f-beceefae917f","Type":"ContainerStarted","Data":"69fffb6218f39fd8c40fc61fdb2f967bd4adbf937fae75a3669836059818ffa5"} Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.936141 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" event={"ID":"c4713406-4a5e-4bfd-9a06-3c233d2875f4","Type":"ContainerStarted","Data":"46dc0bfc74533a760b763581904bfef3d1da86cd8a025a23e4a12e5669a261c2"} Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.940686 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" podUID="9d75008c-d650-41f9-9c0f-beceefae917f" Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.942175 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" event={"ID":"8556c821-4b39-4898-8426-197ab92a52ec","Type":"ContainerStarted","Data":"95dc51612e025df73ca47c02adc63a5357b678b3d13c99c4b954dae4880bb6f8"} Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.943186 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" podUID="c4713406-4a5e-4bfd-9a06-3c233d2875f4" Jan 22 16:20:33 crc kubenswrapper[5032]: I0122 16:20:33.944618 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" event={"ID":"ff38f03d-52c0-42dc-85e9-9a67709d0d74","Type":"ContainerStarted","Data":"a6ee9fb3c5708a36d04c89cbcfc46718f69e5cce71ec60f041c72c1a0c46c0fb"} Jan 22 16:20:33 crc kubenswrapper[5032]: E0122 16:20:33.948705 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" podUID="ff38f03d-52c0-42dc-85e9-9a67709d0d74" Jan 22 16:20:34 crc kubenswrapper[5032]: I0122 16:20:34.723025 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:34 crc kubenswrapper[5032]: E0122 16:20:34.723275 5032 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:34 crc kubenswrapper[5032]: E0122 16:20:34.723409 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert podName:59dad380-97a8-4d1a-9dd7-823d8be84b09 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:38.723376549 +0000 UTC m=+942.535545610 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert") pod "infra-operator-controller-manager-54ccf4f85d-42r27" (UID: "59dad380-97a8-4d1a-9dd7-823d8be84b09") : secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:35 crc kubenswrapper[5032]: I0122 16:20:35.027556 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:35 crc kubenswrapper[5032]: E0122 16:20:35.027771 5032 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:35 crc kubenswrapper[5032]: E0122 16:20:35.027883 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert podName:f9abaa76-29ed-4f41-9e98-0d6852a98e01 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:39.02783865 +0000 UTC m=+942.840007691 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" (UID: "f9abaa76-29ed-4f41-9e98-0d6852a98e01") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:35 crc kubenswrapper[5032]: I0122 16:20:35.535096 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:35 crc kubenswrapper[5032]: E0122 16:20:35.535327 5032 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 16:20:35 crc kubenswrapper[5032]: I0122 16:20:35.535645 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:35 crc kubenswrapper[5032]: E0122 16:20:35.535732 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:39.535702449 +0000 UTC m=+943.347871520 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "metrics-server-cert" not found Jan 22 16:20:35 crc kubenswrapper[5032]: E0122 16:20:35.535983 5032 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 16:20:35 crc kubenswrapper[5032]: E0122 16:20:35.536144 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:39.53609169 +0000 UTC m=+943.348260751 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "webhook-server-cert" not found Jan 22 16:20:38 crc kubenswrapper[5032]: I0122 16:20:38.791943 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:38 crc kubenswrapper[5032]: E0122 16:20:38.792105 5032 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:38 crc kubenswrapper[5032]: E0122 16:20:38.792585 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert podName:59dad380-97a8-4d1a-9dd7-823d8be84b09 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:46.792560204 +0000 UTC m=+950.604729235 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert") pod "infra-operator-controller-manager-54ccf4f85d-42r27" (UID: "59dad380-97a8-4d1a-9dd7-823d8be84b09") : secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:39 crc kubenswrapper[5032]: I0122 16:20:39.098024 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.098209 5032 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.098288 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert podName:f9abaa76-29ed-4f41-9e98-0d6852a98e01 nodeName:}" failed. No retries permitted until 2026-01-22 16:20:47.098270688 +0000 UTC m=+950.910439719 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" (UID: "f9abaa76-29ed-4f41-9e98-0d6852a98e01") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:39 crc kubenswrapper[5032]: I0122 16:20:39.531062 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" event={"ID":"739bb968-5cd4-460a-a812-dba945e8a695","Type":"ContainerStarted","Data":"c78f0a91dcd30cf89327c0cbe3dcb53fdca476bd319d80df1b445436e03a530a"} Jan 22 16:20:39 crc kubenswrapper[5032]: I0122 16:20:39.532120 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" event={"ID":"4f11f845-b9f4-4720-bddd-e5c56ef8a893","Type":"ContainerStarted","Data":"972f952629a1114fb22357d7b22f71a1918a6c5ad6a2739a4cfb0de7ac3a126d"} Jan 22 16:20:39 crc kubenswrapper[5032]: I0122 16:20:39.534339 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" event={"ID":"c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5","Type":"ContainerStarted","Data":"4490a09fe2c98c9134f799980140d435b674c348bf736ac91e7b5e8297ee4414"} Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.534741 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" podUID="739bb968-5cd4-460a-a812-dba945e8a695" Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.535896 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" podUID="4f11f845-b9f4-4720-bddd-e5c56ef8a893" Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.538322 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" podUID="c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5" Jan 22 16:20:39 crc kubenswrapper[5032]: I0122 16:20:39.538353 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" event={"ID":"f4c46099-26f5-4475-976d-1852dd96b9f8","Type":"ContainerStarted","Data":"f045c68c6f7f1a30265d908912eb45c0ff29495f60e20fbd7358d783824513a5"} Jan 22 16:20:39 crc kubenswrapper[5032]: I0122 16:20:39.606610 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.606783 5032 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.606850 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:47.606832087 +0000 UTC m=+951.419001118 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "metrics-server-cert" not found Jan 22 16:20:39 crc kubenswrapper[5032]: I0122 16:20:39.606888 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.607510 5032 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 16:20:39 crc kubenswrapper[5032]: E0122 16:20:39.607550 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:20:47.607541006 +0000 UTC m=+951.419710037 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "webhook-server-cert" not found Jan 22 16:20:40 crc kubenswrapper[5032]: E0122 16:20:40.548782 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" podUID="4f11f845-b9f4-4720-bddd-e5c56ef8a893" Jan 22 16:20:40 crc kubenswrapper[5032]: E0122 16:20:40.549179 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" podUID="c4713406-4a5e-4bfd-9a06-3c233d2875f4" Jan 22 16:20:40 crc kubenswrapper[5032]: E0122 16:20:40.549306 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" podUID="c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5" Jan 22 16:20:40 crc kubenswrapper[5032]: E0122 16:20:40.549349 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" podUID="fc432eb8-8449-4f05-9441-18726488a595" Jan 22 16:20:40 crc kubenswrapper[5032]: E0122 16:20:40.549397 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" podUID="ff38f03d-52c0-42dc-85e9-9a67709d0d74" Jan 22 16:20:40 crc kubenswrapper[5032]: E0122 16:20:40.549445 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" podUID="739bb968-5cd4-460a-a812-dba945e8a695" Jan 22 16:20:40 crc kubenswrapper[5032]: E0122 16:20:40.549482 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" podUID="9d75008c-d650-41f9-9c0f-beceefae917f" Jan 22 16:20:46 crc kubenswrapper[5032]: I0122 16:20:46.891554 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:20:46 crc kubenswrapper[5032]: E0122 16:20:46.891793 5032 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:46 crc kubenswrapper[5032]: E0122 16:20:46.892611 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert podName:59dad380-97a8-4d1a-9dd7-823d8be84b09 nodeName:}" failed. No retries permitted until 2026-01-22 16:21:02.892583712 +0000 UTC m=+966.704752753 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert") pod "infra-operator-controller-manager-54ccf4f85d-42r27" (UID: "59dad380-97a8-4d1a-9dd7-823d8be84b09") : secret "infra-operator-webhook-server-cert" not found Jan 22 16:20:47 crc kubenswrapper[5032]: I0122 16:20:47.196264 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:20:47 crc kubenswrapper[5032]: E0122 16:20:47.196426 5032 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:47 crc kubenswrapper[5032]: E0122 16:20:47.196483 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert podName:f9abaa76-29ed-4f41-9e98-0d6852a98e01 nodeName:}" failed. No retries permitted until 2026-01-22 16:21:03.196465737 +0000 UTC m=+967.008634768 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" (UID: "f9abaa76-29ed-4f41-9e98-0d6852a98e01") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 16:20:47 crc kubenswrapper[5032]: I0122 16:20:47.706627 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:47 crc kubenswrapper[5032]: I0122 16:20:47.706744 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:20:47 crc kubenswrapper[5032]: E0122 16:20:47.706816 5032 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 16:20:47 crc kubenswrapper[5032]: E0122 16:20:47.706923 5032 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 16:20:47 crc kubenswrapper[5032]: E0122 16:20:47.706964 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:21:03.706937497 +0000 UTC m=+967.519106578 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "webhook-server-cert" not found Jan 22 16:20:47 crc kubenswrapper[5032]: E0122 16:20:47.706998 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs podName:ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb nodeName:}" failed. No retries permitted until 2026-01-22 16:21:03.706981958 +0000 UTC m=+967.519151029 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs") pod "openstack-operator-controller-manager-7f64d4d54c-p884k" (UID: "ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb") : secret "metrics-server-cert" not found Jan 22 16:20:50 crc kubenswrapper[5032]: E0122 16:20:50.904891 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822" Jan 22 16:20:50 crc kubenswrapper[5032]: E0122 16:20:50.905486 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hj9zp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-77d5c5b54f-jtvgl_openstack-operators(f7d15cd8-d231-493f-ace5-b52e78dff983): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:50 crc kubenswrapper[5032]: E0122 16:20:50.906698 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" podUID="f7d15cd8-d231-493f-ace5-b52e78dff983" Jan 22 16:20:51 crc kubenswrapper[5032]: E0122 16:20:51.633520 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" podUID="f7d15cd8-d231-493f-ace5-b52e78dff983" Jan 22 16:20:52 crc kubenswrapper[5032]: E0122 16:20:52.061691 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492" Jan 22 16:20:52 crc kubenswrapper[5032]: E0122 16:20:52.062617 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4wxj7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-594c8c9d5d-jtrqh_openstack-operators(b0a92c7d-3eb6-4d18-a03c-82b380ba29d9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:52 crc kubenswrapper[5032]: E0122 16:20:52.064208 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" podUID="b0a92c7d-3eb6-4d18-a03c-82b380ba29d9" Jan 22 16:20:52 crc kubenswrapper[5032]: E0122 16:20:52.641269 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492\\\"\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" podUID="b0a92c7d-3eb6-4d18-a03c-82b380ba29d9" Jan 22 16:20:53 crc kubenswrapper[5032]: E0122 16:20:53.135770 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:d3c55b59cb192799f8d31196c55c9e9bb3cd38aef7ec51ef257dabf1548e8b30" Jan 22 16:20:53 crc kubenswrapper[5032]: E0122 16:20:53.136092 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d3c55b59cb192799f8d31196c55c9e9bb3cd38aef7ec51ef257dabf1548e8b30,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n5t7h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-69d6c9f5b8-46xgl_openstack-operators(550ebf15-3945-4782-b3b0-c32a34743e65): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:53 crc kubenswrapper[5032]: E0122 16:20:53.137346 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" podUID="550ebf15-3945-4782-b3b0-c32a34743e65" Jan 22 16:20:53 crc kubenswrapper[5032]: E0122 16:20:53.639101 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337" Jan 22 16:20:53 crc kubenswrapper[5032]: E0122 16:20:53.639290 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jcvtz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-78fdd796fd-9r28s_openstack-operators(e09867e2-02a7-4b8d-a388-78cb4a00b687): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:53 crc kubenswrapper[5032]: E0122 16:20:53.641045 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" podUID="e09867e2-02a7-4b8d-a388-78cb4a00b687" Jan 22 16:20:53 crc kubenswrapper[5032]: E0122 16:20:53.649260 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:d3c55b59cb192799f8d31196c55c9e9bb3cd38aef7ec51ef257dabf1548e8b30\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" podUID="550ebf15-3945-4782-b3b0-c32a34743e65" Jan 22 16:20:54 crc kubenswrapper[5032]: E0122 16:20:54.230035 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f" Jan 22 16:20:54 crc kubenswrapper[5032]: E0122 16:20:54.230213 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-95pkt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-69cf5d4557-k9425_openstack-operators(57b66df0-2faf-4f1a-980c-55094e9abda4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:54 crc kubenswrapper[5032]: E0122 16:20:54.232897 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" podUID="57b66df0-2faf-4f1a-980c-55094e9abda4" Jan 22 16:20:54 crc kubenswrapper[5032]: E0122 16:20:54.654444 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337\\\"\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" podUID="e09867e2-02a7-4b8d-a388-78cb4a00b687" Jan 22 16:20:54 crc kubenswrapper[5032]: E0122 16:20:54.655067 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" podUID="57b66df0-2faf-4f1a-980c-55094e9abda4" Jan 22 16:20:54 crc kubenswrapper[5032]: E0122 16:20:54.998781 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922" Jan 22 16:20:54 crc kubenswrapper[5032]: E0122 16:20:54.999378 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kqhwd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-cqbtr_openstack-operators(e7b1c714-1088-419e-9e6b-d553a73583a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:55 crc kubenswrapper[5032]: E0122 16:20:55.000735 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" podUID="e7b1c714-1088-419e-9e6b-d553a73583a5" Jan 22 16:20:55 crc kubenswrapper[5032]: E0122 16:20:55.661056 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" podUID="e7b1c714-1088-419e-9e6b-d553a73583a5" Jan 22 16:20:57 crc kubenswrapper[5032]: E0122 16:20:57.117119 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 22 16:20:57 crc kubenswrapper[5032]: E0122 16:20:57.117530 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7tw66,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-nqhbr_openstack-operators(62453126-30f2-45a5-9b4d-d4844d39107d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:57 crc kubenswrapper[5032]: E0122 16:20:57.118733 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" podUID="62453126-30f2-45a5-9b4d-d4844d39107d" Jan 22 16:20:57 crc kubenswrapper[5032]: E0122 16:20:57.684519 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" podUID="62453126-30f2-45a5-9b4d-d4844d39107d" Jan 22 16:20:57 crc kubenswrapper[5032]: E0122 16:20:57.744330 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece" Jan 22 16:20:57 crc kubenswrapper[5032]: E0122 16:20:57.744582 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qjnjb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-b45d7bf98-4gkbq_openstack-operators(e558895e-9fa5-46dc-a852-2b876aa9d8f3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:57 crc kubenswrapper[5032]: E0122 16:20:57.746363 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" podUID="e558895e-9fa5-46dc-a852-2b876aa9d8f3" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.382837 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.383298 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t8nx4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-59dd8b7cbf-wzh72_openstack-operators(7f26e503-3b5f-4a4d-9b23-c8b742fef2a9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.384779 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" podUID="7f26e503-3b5f-4a4d-9b23-c8b742fef2a9" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.692709 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" podUID="7f26e503-3b5f-4a4d-9b23-c8b742fef2a9" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.693002 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece\\\"\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" podUID="e558895e-9fa5-46dc-a852-2b876aa9d8f3" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.900849 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:ff0b6c27e2d96afccd73fbbb5b5297a3f60c7f4f1dfd2a877152466697018d71" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.901041 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:ff0b6c27e2d96afccd73fbbb5b5297a3f60c7f4f1dfd2a877152466697018d71,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qw9q6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-c87fff755-28l4v_openstack-operators(f4c46099-26f5-4475-976d-1852dd96b9f8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:58 crc kubenswrapper[5032]: E0122 16:20:58.902365 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" podUID="f4c46099-26f5-4475-976d-1852dd96b9f8" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.456697 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.456851 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-97qgb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-78c6999f6f-th8gj_openstack-operators(d7204606-7e41-46b7-bc8f-ffb0088f839e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.458112 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" podUID="d7204606-7e41-46b7-bc8f-ffb0088f839e" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.699196 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:ff0b6c27e2d96afccd73fbbb5b5297a3f60c7f4f1dfd2a877152466697018d71\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" podUID="f4c46099-26f5-4475-976d-1852dd96b9f8" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.700281 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" podUID="d7204606-7e41-46b7-bc8f-ffb0088f839e" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.991566 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/neutron-operator:ee35d45e2b7d36f4be2f39423542f378a292b175" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.991629 5032 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/neutron-operator:ee35d45e2b7d36f4be2f39423542f378a292b175" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.991783 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.113:5001/openstack-k8s-operators/neutron-operator:ee35d45e2b7d36f4be2f39423542f378a292b175,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gz22v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-8597d6d6d5-2wsj8_openstack-operators(8556c821-4b39-4898-8426-197ab92a52ec): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:20:59 crc kubenswrapper[5032]: E0122 16:20:59.993057 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" podUID="8556c821-4b39-4898-8426-197ab92a52ec" Jan 22 16:21:00 crc kubenswrapper[5032]: E0122 16:21:00.702822 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.113:5001/openstack-k8s-operators/neutron-operator:ee35d45e2b7d36f4be2f39423542f378a292b175\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" podUID="8556c821-4b39-4898-8426-197ab92a52ec" Jan 22 16:21:02 crc kubenswrapper[5032]: I0122 16:21:02.951600 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:21:02 crc kubenswrapper[5032]: I0122 16:21:02.959499 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/59dad380-97a8-4d1a-9dd7-823d8be84b09-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-42r27\" (UID: \"59dad380-97a8-4d1a-9dd7-823d8be84b09\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.016836 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.017247 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.046704 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-5q7ms" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.055895 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.255517 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.260615 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f9abaa76-29ed-4f41-9e98-0d6852a98e01-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8\" (UID: \"f9abaa76-29ed-4f41-9e98-0d6852a98e01\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.357820 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-gmd5j" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.365851 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.761760 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.761926 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.769054 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-metrics-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.770110 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb-webhook-certs\") pod \"openstack-operator-controller-manager-7f64d4d54c-p884k\" (UID: \"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb\") " pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.961538 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-v8gnm" Jan 22 16:21:03 crc kubenswrapper[5032]: I0122 16:21:03.970624 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.457057 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.584539 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k"] Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.723893 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27"] Jan 22 16:21:05 crc kubenswrapper[5032]: W0122 16:21:05.728987 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59dad380_97a8_4d1a_9dd7_823d8be84b09.slice/crio-f101702d0231202f757a3ab475fa85bc7a35d6720cc86e5761774ffe15cf2e6c WatchSource:0}: Error finding container f101702d0231202f757a3ab475fa85bc7a35d6720cc86e5761774ffe15cf2e6c: Status 404 returned error can't find the container with id f101702d0231202f757a3ab475fa85bc7a35d6720cc86e5761774ffe15cf2e6c Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.738131 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8"] Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.759209 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" event={"ID":"739bb968-5cd4-460a-a812-dba945e8a695","Type":"ContainerStarted","Data":"04a5b959b30ba36e7a88f592ed10e68c78879745e02a1c986c5417a591e2d89f"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.759563 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.763166 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" event={"ID":"4f11f845-b9f4-4720-bddd-e5c56ef8a893","Type":"ContainerStarted","Data":"d78f5b850cd3dcf1b735f1e146b383cd34ed6d6d404f47e9c34d67e6948cd3fe"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.764137 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.777688 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" event={"ID":"59dad380-97a8-4d1a-9dd7-823d8be84b09","Type":"ContainerStarted","Data":"f101702d0231202f757a3ab475fa85bc7a35d6720cc86e5761774ffe15cf2e6c"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.809351 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" event={"ID":"c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5","Type":"ContainerStarted","Data":"4075f7cd604053434655cc737e2ec1551d20ce48573e10058297625ea97ef394"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.809941 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.821547 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" event={"ID":"c4713406-4a5e-4bfd-9a06-3c233d2875f4","Type":"ContainerStarted","Data":"78e90de7860409c9859b963afd7ea926ee0f9add6189bceaad15b0f1ef0ef13c"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.822752 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.828479 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" podStartSLOduration=2.606639539 podStartE2EDuration="34.828464167s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.921072383 +0000 UTC m=+936.733241414" lastFinishedPulling="2026-01-22 16:21:05.142897011 +0000 UTC m=+968.955066042" observedRunningTime="2026-01-22 16:21:05.826896295 +0000 UTC m=+969.639065336" watchObservedRunningTime="2026-01-22 16:21:05.828464167 +0000 UTC m=+969.640633198" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.829851 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" podStartSLOduration=2.634333026 podStartE2EDuration="34.829845395s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.902618566 +0000 UTC m=+936.714787597" lastFinishedPulling="2026-01-22 16:21:05.098130935 +0000 UTC m=+968.910299966" observedRunningTime="2026-01-22 16:21:05.809402124 +0000 UTC m=+969.621571165" watchObservedRunningTime="2026-01-22 16:21:05.829845395 +0000 UTC m=+969.642014426" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.832365 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" event={"ID":"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb","Type":"ContainerStarted","Data":"50bf053045fe6299d96ecf27834a7c0fdfc6e775d82eee003387bb8d33802679"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.848152 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" event={"ID":"ff38f03d-52c0-42dc-85e9-9a67709d0d74","Type":"ContainerStarted","Data":"dd0a488bb2dbebf656299c420ce000579557a5c1e9761c418698bb4ce074ed0d"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.849175 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.861232 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" event={"ID":"fc432eb8-8449-4f05-9441-18726488a595","Type":"ContainerStarted","Data":"0cef9e9958efe1fcba907d93d1166240333067dce995c4f7c8e79b83ed6f244c"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.867404 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" event={"ID":"a4c62d00-dba9-4309-9588-e5cb36dd675f","Type":"ContainerStarted","Data":"67e804b53e364c9826f085ca4422f18044ae837bec67d836ca605f99d84179e8"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.868080 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.869121 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" event={"ID":"9d75008c-d650-41f9-9c0f-beceefae917f","Type":"ContainerStarted","Data":"1266a9b97b40893d2aff7b35be7aae1f56744473d0fc6861b5b602b4d9817c0b"} Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.869492 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.904738 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" podStartSLOduration=3.632147447 podStartE2EDuration="35.904716041s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.901662751 +0000 UTC m=+936.713831772" lastFinishedPulling="2026-01-22 16:21:05.174231335 +0000 UTC m=+968.986400366" observedRunningTime="2026-01-22 16:21:05.894901677 +0000 UTC m=+969.707070708" watchObservedRunningTime="2026-01-22 16:21:05.904716041 +0000 UTC m=+969.716885072" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.906285 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" podStartSLOduration=2.668761462 podStartE2EDuration="34.906277843s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.905397831 +0000 UTC m=+936.717566862" lastFinishedPulling="2026-01-22 16:21:05.142914212 +0000 UTC m=+968.955083243" observedRunningTime="2026-01-22 16:21:05.866631195 +0000 UTC m=+969.678800226" watchObservedRunningTime="2026-01-22 16:21:05.906277843 +0000 UTC m=+969.718446874" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.917966 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-46dr5" podStartSLOduration=2.6539720449999997 podStartE2EDuration="34.917951818s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.909553233 +0000 UTC m=+936.721722264" lastFinishedPulling="2026-01-22 16:21:05.173533006 +0000 UTC m=+968.985702037" observedRunningTime="2026-01-22 16:21:05.913838817 +0000 UTC m=+969.726007848" watchObservedRunningTime="2026-01-22 16:21:05.917951818 +0000 UTC m=+969.730120849" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.991245 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" podStartSLOduration=3.254328816 podStartE2EDuration="34.991225272s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.906649095 +0000 UTC m=+936.718818166" lastFinishedPulling="2026-01-22 16:21:04.643545591 +0000 UTC m=+968.455714622" observedRunningTime="2026-01-22 16:21:05.987185533 +0000 UTC m=+969.799354564" watchObservedRunningTime="2026-01-22 16:21:05.991225272 +0000 UTC m=+969.803394303" Jan 22 16:21:05 crc kubenswrapper[5032]: I0122 16:21:05.992918 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" podStartSLOduration=2.8036188060000002 podStartE2EDuration="34.992909627s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.908802253 +0000 UTC m=+936.720971284" lastFinishedPulling="2026-01-22 16:21:05.098093074 +0000 UTC m=+968.910262105" observedRunningTime="2026-01-22 16:21:05.940259569 +0000 UTC m=+969.752428600" watchObservedRunningTime="2026-01-22 16:21:05.992909627 +0000 UTC m=+969.805078658" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.008338 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" podStartSLOduration=6.718751051 podStartE2EDuration="35.008322712s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.831025078 +0000 UTC m=+936.643194099" lastFinishedPulling="2026-01-22 16:21:01.120596729 +0000 UTC m=+964.932765760" observedRunningTime="2026-01-22 16:21:06.004003106 +0000 UTC m=+969.816172147" watchObservedRunningTime="2026-01-22 16:21:06.008322712 +0000 UTC m=+969.820491743" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.895181 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" event={"ID":"b0a92c7d-3eb6-4d18-a03c-82b380ba29d9","Type":"ContainerStarted","Data":"3614df10b2852725913e901ff8746ffef415fa22cda9c80ae0987cb014ab8846"} Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.896533 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.898177 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" event={"ID":"ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb","Type":"ContainerStarted","Data":"753128f31ef5bff5fd1ec209004631abd73756679164424d4b52615f803b8845"} Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.898553 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.903240 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" event={"ID":"e09867e2-02a7-4b8d-a388-78cb4a00b687","Type":"ContainerStarted","Data":"5694fccd2e6ec6f16bf6301714aff9cee01c6f53e87cc9a14561d37155c85065"} Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.903553 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.907179 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" event={"ID":"550ebf15-3945-4782-b3b0-c32a34743e65","Type":"ContainerStarted","Data":"e3ca8d5cce4f93d4cc576eab24b066acf1e8c263c43259a5903d3061ad659c83"} Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.907485 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.908958 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" event={"ID":"f9abaa76-29ed-4f41-9e98-0d6852a98e01","Type":"ContainerStarted","Data":"90d55998a14cc39205dfb420ceac7fcca009c9cb06863ce378e5abd5b8dc953d"} Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.910459 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" event={"ID":"f7d15cd8-d231-493f-ace5-b52e78dff983","Type":"ContainerStarted","Data":"38e85624bcab3d7698c5641184192d6cceeba48c1d82b8285916b9dc2b1e5f72"} Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.910750 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.923996 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" podStartSLOduration=3.40141471 podStartE2EDuration="36.923962744s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.400719307 +0000 UTC m=+936.212888348" lastFinishedPulling="2026-01-22 16:21:05.923267351 +0000 UTC m=+969.735436382" observedRunningTime="2026-01-22 16:21:06.917149041 +0000 UTC m=+970.729318072" watchObservedRunningTime="2026-01-22 16:21:06.923962744 +0000 UTC m=+970.736131785" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.947745 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" podStartSLOduration=3.7476964390000003 podStartE2EDuration="36.947727525s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.881540599 +0000 UTC m=+936.693709620" lastFinishedPulling="2026-01-22 16:21:06.081571675 +0000 UTC m=+969.893740706" observedRunningTime="2026-01-22 16:21:06.9419768 +0000 UTC m=+970.754145831" watchObservedRunningTime="2026-01-22 16:21:06.947727525 +0000 UTC m=+970.759896556" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.978702 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" podStartSLOduration=35.978685368 podStartE2EDuration="35.978685368s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:21:06.973360115 +0000 UTC m=+970.785529146" watchObservedRunningTime="2026-01-22 16:21:06.978685368 +0000 UTC m=+970.790854399" Jan 22 16:21:06 crc kubenswrapper[5032]: I0122 16:21:06.999070 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" podStartSLOduration=3.480299466 podStartE2EDuration="36.999055107s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.401324644 +0000 UTC m=+936.213493675" lastFinishedPulling="2026-01-22 16:21:05.920080285 +0000 UTC m=+969.732249316" observedRunningTime="2026-01-22 16:21:06.996288013 +0000 UTC m=+970.808457044" watchObservedRunningTime="2026-01-22 16:21:06.999055107 +0000 UTC m=+970.811224128" Jan 22 16:21:10 crc kubenswrapper[5032]: I0122 16:21:10.474733 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" podStartSLOduration=6.831937593 podStartE2EDuration="40.474713215s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.396814372 +0000 UTC m=+936.208983423" lastFinishedPulling="2026-01-22 16:21:06.039590014 +0000 UTC m=+969.851759045" observedRunningTime="2026-01-22 16:21:07.015833469 +0000 UTC m=+970.828002500" watchObservedRunningTime="2026-01-22 16:21:10.474713215 +0000 UTC m=+974.286882246" Jan 22 16:21:10 crc kubenswrapper[5032]: I0122 16:21:10.945990 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" event={"ID":"59dad380-97a8-4d1a-9dd7-823d8be84b09","Type":"ContainerStarted","Data":"e86641b2a447ff3fa3c08ecd1626deb30498c0dc430ea64ad4d50edbdeac27eb"} Jan 22 16:21:10 crc kubenswrapper[5032]: I0122 16:21:10.946084 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:21:10 crc kubenswrapper[5032]: I0122 16:21:10.948651 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" event={"ID":"f9abaa76-29ed-4f41-9e98-0d6852a98e01","Type":"ContainerStarted","Data":"ac408e7d62408a2b3de26b84f6d9dbba74b62cb5c8006c20f11546b0c82701a6"} Jan 22 16:21:10 crc kubenswrapper[5032]: I0122 16:21:10.949076 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:21:10 crc kubenswrapper[5032]: I0122 16:21:10.962408 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" podStartSLOduration=36.503712973 podStartE2EDuration="40.962375208s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:21:05.734072045 +0000 UTC m=+969.546241076" lastFinishedPulling="2026-01-22 16:21:10.19273428 +0000 UTC m=+974.004903311" observedRunningTime="2026-01-22 16:21:10.962111971 +0000 UTC m=+974.774281042" watchObservedRunningTime="2026-01-22 16:21:10.962375208 +0000 UTC m=+974.774544269" Jan 22 16:21:10 crc kubenswrapper[5032]: I0122 16:21:10.998805 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" podStartSLOduration=35.602356623 podStartE2EDuration="39.998778441s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:21:05.777503385 +0000 UTC m=+969.589672416" lastFinishedPulling="2026-01-22 16:21:10.173925203 +0000 UTC m=+973.986094234" observedRunningTime="2026-01-22 16:21:10.99463069 +0000 UTC m=+974.806799761" watchObservedRunningTime="2026-01-22 16:21:10.998778441 +0000 UTC m=+974.810947472" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.188313 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-jtrqh" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.238304 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-jtvgl" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.345939 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-46xgl" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.477469 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-9r28s" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.530442 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-7xr7k" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.541401 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-4dtdf" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.597921 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-ljq9b" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.693144 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-5cxj6" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.956434 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" event={"ID":"7f26e503-3b5f-4a4d-9b23-c8b742fef2a9","Type":"ContainerStarted","Data":"48e53efca6b99e72191130d97d8da3137d564364fd07d52541536900a747d51b"} Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.956645 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.957983 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" event={"ID":"57b66df0-2faf-4f1a-980c-55094e9abda4","Type":"ContainerStarted","Data":"a59470b72b2f120aeef59a6460a460169c5c25c6b6df567ca76997f954717353"} Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.975733 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" podStartSLOduration=2.947783709 podStartE2EDuration="41.975716847s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.087894721 +0000 UTC m=+935.900063752" lastFinishedPulling="2026-01-22 16:21:11.115827859 +0000 UTC m=+974.927996890" observedRunningTime="2026-01-22 16:21:11.973778705 +0000 UTC m=+975.785947766" watchObservedRunningTime="2026-01-22 16:21:11.975716847 +0000 UTC m=+975.787885878" Jan 22 16:21:11 crc kubenswrapper[5032]: I0122 16:21:11.993065 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" podStartSLOduration=3.248861836 podStartE2EDuration="41.99303567s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.371594533 +0000 UTC m=+936.183763564" lastFinishedPulling="2026-01-22 16:21:11.115768367 +0000 UTC m=+974.927937398" observedRunningTime="2026-01-22 16:21:11.990122122 +0000 UTC m=+975.802291153" watchObservedRunningTime="2026-01-22 16:21:11.99303567 +0000 UTC m=+975.805204731" Jan 22 16:21:12 crc kubenswrapper[5032]: I0122 16:21:12.052110 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-x8dvh" Jan 22 16:21:12 crc kubenswrapper[5032]: I0122 16:21:12.124983 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-w27st" Jan 22 16:21:12 crc kubenswrapper[5032]: I0122 16:21:12.157363 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-g98vr" Jan 22 16:21:13 crc kubenswrapper[5032]: I0122 16:21:13.978692 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7f64d4d54c-p884k" Jan 22 16:21:21 crc kubenswrapper[5032]: I0122 16:21:21.075543 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wzh72" Jan 22 16:21:21 crc kubenswrapper[5032]: I0122 16:21:21.104835 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" Jan 22 16:21:21 crc kubenswrapper[5032]: I0122 16:21:21.108600 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-k9425" Jan 22 16:21:23 crc kubenswrapper[5032]: I0122 16:21:23.065820 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-42r27" Jan 22 16:21:23 crc kubenswrapper[5032]: I0122 16:21:23.373408 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.570360 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" event={"ID":"e558895e-9fa5-46dc-a852-2b876aa9d8f3","Type":"ContainerStarted","Data":"828b7182d86ba0d9630ecc937af1f8e90c59d31acb125274e092e524f749947e"} Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.571180 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.572663 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" event={"ID":"f4c46099-26f5-4475-976d-1852dd96b9f8","Type":"ContainerStarted","Data":"214e60d69a46d4fa5e7bd9c3f778a45c7913ebf0d5a4c9577e869872c6cb874f"} Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.572983 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.575303 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" event={"ID":"e7b1c714-1088-419e-9e6b-d553a73583a5","Type":"ContainerStarted","Data":"ba3512e0d786637576b0813c9bae3ccc4ec7d236665e11e260affb4434dc69dd"} Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.575479 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.583215 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" event={"ID":"62453126-30f2-45a5-9b4d-d4844d39107d","Type":"ContainerStarted","Data":"91d9b3f28816921f8bdfd25508f5e481154a8cb648944b0a37a9ed37f866c997"} Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.584012 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.596039 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" event={"ID":"d7204606-7e41-46b7-bc8f-ffb0088f839e","Type":"ContainerStarted","Data":"e761bf8c2275b0998b63bad3e5aa7e0c5d43f81c5f895206332ed88d0d21cd93"} Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.596760 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.599814 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" event={"ID":"8556c821-4b39-4898-8426-197ab92a52ec","Type":"ContainerStarted","Data":"58824868bc7ea682d1a763a7ca329baf3e999059f7a2bcfb2130994680daadcc"} Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.600340 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.608158 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" podStartSLOduration=3.50863768 podStartE2EDuration="59.60814294s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.383504314 +0000 UTC m=+936.195673345" lastFinishedPulling="2026-01-22 16:21:28.483009574 +0000 UTC m=+992.295178605" observedRunningTime="2026-01-22 16:21:29.604410971 +0000 UTC m=+993.416580002" watchObservedRunningTime="2026-01-22 16:21:29.60814294 +0000 UTC m=+993.420311971" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.618953 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" podStartSLOduration=3.555193853 podStartE2EDuration="58.618929059s" podCreationTimestamp="2026-01-22 16:20:31 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.882220977 +0000 UTC m=+936.694390008" lastFinishedPulling="2026-01-22 16:21:27.945956153 +0000 UTC m=+991.758125214" observedRunningTime="2026-01-22 16:21:29.617451859 +0000 UTC m=+993.429620890" watchObservedRunningTime="2026-01-22 16:21:29.618929059 +0000 UTC m=+993.431098090" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.645772 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" podStartSLOduration=4.45202307 podStartE2EDuration="59.645752125s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.858613661 +0000 UTC m=+936.670782712" lastFinishedPulling="2026-01-22 16:21:28.052342726 +0000 UTC m=+991.864511767" observedRunningTime="2026-01-22 16:21:29.639812187 +0000 UTC m=+993.451981218" watchObservedRunningTime="2026-01-22 16:21:29.645752125 +0000 UTC m=+993.457921156" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.668505 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" podStartSLOduration=4.087727925 podStartE2EDuration="59.668485303s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.901005223 +0000 UTC m=+936.713174254" lastFinishedPulling="2026-01-22 16:21:28.481762601 +0000 UTC m=+992.293931632" observedRunningTime="2026-01-22 16:21:29.661182678 +0000 UTC m=+993.473351729" watchObservedRunningTime="2026-01-22 16:21:29.668485303 +0000 UTC m=+993.480654344" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.688497 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" podStartSLOduration=4.073859727 podStartE2EDuration="59.688478367s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.867969723 +0000 UTC m=+936.680138744" lastFinishedPulling="2026-01-22 16:21:28.482588353 +0000 UTC m=+992.294757384" observedRunningTime="2026-01-22 16:21:29.685676372 +0000 UTC m=+993.497845423" watchObservedRunningTime="2026-01-22 16:21:29.688478367 +0000 UTC m=+993.500647398" Jan 22 16:21:29 crc kubenswrapper[5032]: I0122 16:21:29.713154 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" podStartSLOduration=4.603511005 podStartE2EDuration="59.713137746s" podCreationTimestamp="2026-01-22 16:20:30 +0000 UTC" firstStartedPulling="2026-01-22 16:20:32.840481383 +0000 UTC m=+936.652650414" lastFinishedPulling="2026-01-22 16:21:27.950108114 +0000 UTC m=+991.762277155" observedRunningTime="2026-01-22 16:21:29.708555194 +0000 UTC m=+993.520724225" watchObservedRunningTime="2026-01-22 16:21:29.713137746 +0000 UTC m=+993.525306777" Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.016801 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.019381 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.019649 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.020873 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7c99ec9e6db96c22555ef66c4f25515b95be204438e7cfc23aad474c49a63d5f"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.021249 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://7c99ec9e6db96c22555ef66c4f25515b95be204438e7cfc23aad474c49a63d5f" gracePeriod=600 Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.633922 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="7c99ec9e6db96c22555ef66c4f25515b95be204438e7cfc23aad474c49a63d5f" exitCode=0 Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.633993 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"7c99ec9e6db96c22555ef66c4f25515b95be204438e7cfc23aad474c49a63d5f"} Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.634313 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"0e07bad5ddfef4b3b16bdde98bfadfd138cd9c8946f3f1a8c285fa09c7eb7871"} Jan 22 16:21:33 crc kubenswrapper[5032]: I0122 16:21:33.634336 5032 scope.go:117] "RemoveContainer" containerID="d1c2cb7c0f9349c17ae9e8e3a56654bf423570a8cdc5ea89c7a9b6e4747682cd" Jan 22 16:21:41 crc kubenswrapper[5032]: I0122 16:21:41.116652 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-4gkbq" Jan 22 16:21:41 crc kubenswrapper[5032]: I0122 16:21:41.389110 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-nqhbr" Jan 22 16:21:41 crc kubenswrapper[5032]: I0122 16:21:41.591673 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-th8gj" Jan 22 16:21:41 crc kubenswrapper[5032]: I0122 16:21:41.635722 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-28l4v" Jan 22 16:21:41 crc kubenswrapper[5032]: I0122 16:21:41.691282 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-8597d6d6d5-2wsj8" Jan 22 16:21:42 crc kubenswrapper[5032]: I0122 16:21:42.014494 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cqbtr" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.884504 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g7w74"] Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.889917 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.899316 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-64785" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.899728 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.899840 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.900105 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.915953 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g7w74"] Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.959326 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-829k5"] Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.966972 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.968900 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 22 16:21:57 crc kubenswrapper[5032]: I0122 16:21:57.969105 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-829k5"] Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.067840 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-config\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.068116 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkc89\" (UniqueName: \"kubernetes.io/projected/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-kube-api-access-dkc89\") pod \"dnsmasq-dns-675f4bcbfc-g7w74\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.068161 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.068187 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9h26\" (UniqueName: \"kubernetes.io/projected/ac662245-f579-42a6-b40d-89640103cd3c-kube-api-access-n9h26\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.068246 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-config\") pod \"dnsmasq-dns-675f4bcbfc-g7w74\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.170086 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.170164 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9h26\" (UniqueName: \"kubernetes.io/projected/ac662245-f579-42a6-b40d-89640103cd3c-kube-api-access-n9h26\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.170243 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-config\") pod \"dnsmasq-dns-675f4bcbfc-g7w74\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.170292 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-config\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.170372 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkc89\" (UniqueName: \"kubernetes.io/projected/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-kube-api-access-dkc89\") pod \"dnsmasq-dns-675f4bcbfc-g7w74\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.171401 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.171634 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-config\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.171998 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-config\") pod \"dnsmasq-dns-675f4bcbfc-g7w74\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.191694 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkc89\" (UniqueName: \"kubernetes.io/projected/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-kube-api-access-dkc89\") pod \"dnsmasq-dns-675f4bcbfc-g7w74\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.191728 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9h26\" (UniqueName: \"kubernetes.io/projected/ac662245-f579-42a6-b40d-89640103cd3c-kube-api-access-n9h26\") pod \"dnsmasq-dns-78dd6ddcc-829k5\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.225114 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.294700 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.503650 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g7w74"] Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.591953 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-829k5"] Jan 22 16:21:58 crc kubenswrapper[5032]: W0122 16:21:58.597562 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac662245_f579_42a6_b40d_89640103cd3c.slice/crio-25d656f9aa64a93f7d1154c268004970d2b353a90d5b9360ab16271733ebb4ec WatchSource:0}: Error finding container 25d656f9aa64a93f7d1154c268004970d2b353a90d5b9360ab16271733ebb4ec: Status 404 returned error can't find the container with id 25d656f9aa64a93f7d1154c268004970d2b353a90d5b9360ab16271733ebb4ec Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.833604 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" event={"ID":"6cbba5f7-916d-4ba9-8c15-f84cd472f36d","Type":"ContainerStarted","Data":"fef669d6c26002d928f115c815876a7c17d6ad25c655696f3cfae3578940da4e"} Jan 22 16:21:58 crc kubenswrapper[5032]: I0122 16:21:58.835157 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" event={"ID":"ac662245-f579-42a6-b40d-89640103cd3c","Type":"ContainerStarted","Data":"25d656f9aa64a93f7d1154c268004970d2b353a90d5b9360ab16271733ebb4ec"} Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.711959 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g7w74"] Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.745662 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zrvjp"] Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.746723 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.764315 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zrvjp"] Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.821656 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-config\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.821731 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.822266 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trqjj\" (UniqueName: \"kubernetes.io/projected/ee318086-45e9-4f61-bc62-3ac4024e2635-kube-api-access-trqjj\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.924113 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trqjj\" (UniqueName: \"kubernetes.io/projected/ee318086-45e9-4f61-bc62-3ac4024e2635-kube-api-access-trqjj\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.924218 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-config\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.924257 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.925427 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.925486 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-config\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:00 crc kubenswrapper[5032]: I0122 16:22:00.948002 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trqjj\" (UniqueName: \"kubernetes.io/projected/ee318086-45e9-4f61-bc62-3ac4024e2635-kube-api-access-trqjj\") pod \"dnsmasq-dns-666b6646f7-zrvjp\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.075486 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.110488 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-829k5"] Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.143951 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-c55lx"] Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.145293 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.154695 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-c55lx"] Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.339170 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54ctz\" (UniqueName: \"kubernetes.io/projected/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-kube-api-access-54ctz\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.339703 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-config\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.339733 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.444106 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54ctz\" (UniqueName: \"kubernetes.io/projected/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-kube-api-access-54ctz\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.444239 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-config\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.444299 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.446631 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-config\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.449311 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.484613 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54ctz\" (UniqueName: \"kubernetes.io/projected/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-kube-api-access-54ctz\") pod \"dnsmasq-dns-57d769cc4f-c55lx\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.533744 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.767888 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zrvjp"] Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.872223 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" event={"ID":"ee318086-45e9-4f61-bc62-3ac4024e2635","Type":"ContainerStarted","Data":"92fdf1fe1f570a1f114c5d85c6bb07a4b6dbad6561340846297e8e9139a10d36"} Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.947982 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.949515 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.956427 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.960270 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.960573 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.960946 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-46qls" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.961142 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.961282 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.962333 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 22 16:22:01 crc kubenswrapper[5032]: I0122 16:22:01.962508 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.068260 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.068477 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8q2f\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-kube-api-access-j8q2f\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.068671 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.068704 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-config-data\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.068757 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.068827 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.069022 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.069120 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.069154 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.069193 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.069391 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.074978 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-c55lx"] Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.170814 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.170874 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.170893 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.170911 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.170940 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.170959 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.170994 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8q2f\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-kube-api-access-j8q2f\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.171033 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.171047 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-config-data\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.171065 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.171086 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.173574 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.175673 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.176457 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.177422 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.177766 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-config-data\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.178504 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.194150 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.194519 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.194165 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.195019 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.204052 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8q2f\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-kube-api-access-j8q2f\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.225779 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.259551 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.260573 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.263399 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-gskcv" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.263852 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.264016 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.264077 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.264076 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.264186 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.264897 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.282734 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.300174 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.376118 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.376278 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.376347 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.376407 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.376427 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c40041cb-9206-4776-98ee-30a6c1cf5a85-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.376471 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.378198 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.378274 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.378301 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.378378 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtk9n\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-kube-api-access-vtk9n\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.378624 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c40041cb-9206-4776-98ee-30a6c1cf5a85-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.480890 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.480939 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.480957 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.480979 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtk9n\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-kube-api-access-vtk9n\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.481014 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c40041cb-9206-4776-98ee-30a6c1cf5a85-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.481038 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.481056 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.481086 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.481139 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c40041cb-9206-4776-98ee-30a6c1cf5a85-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.481154 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.481183 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.483928 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.484555 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.485888 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.486195 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.487226 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.487698 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.489789 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c40041cb-9206-4776-98ee-30a6c1cf5a85-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.490397 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c40041cb-9206-4776-98ee-30a6c1cf5a85-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.493208 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.494298 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.513256 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtk9n\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-kube-api-access-vtk9n\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.516139 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.589266 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.897150 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" event={"ID":"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2","Type":"ContainerStarted","Data":"b214ff1a7946d73b19742e2196e45f0b009919f3405bf697f62e4ac1be99f1e7"} Jan 22 16:22:02 crc kubenswrapper[5032]: I0122 16:22:02.900985 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.075832 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.585665 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.587812 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.594446 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-2mx7d" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.594787 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.594885 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.594948 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.598492 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.606192 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.700500 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c951d2-5c76-4a36-a53c-47559731095e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.700537 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c951d2-5c76-4a36-a53c-47559731095e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.700556 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-config-data-default\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.700603 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xww5m\" (UniqueName: \"kubernetes.io/projected/45c951d2-5c76-4a36-a53c-47559731095e-kube-api-access-xww5m\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.700692 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.700766 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-kolla-config\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.701026 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/45c951d2-5c76-4a36-a53c-47559731095e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.701203 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.803588 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c951d2-5c76-4a36-a53c-47559731095e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.803867 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-config-data-default\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.803962 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xww5m\" (UniqueName: \"kubernetes.io/projected/45c951d2-5c76-4a36-a53c-47559731095e-kube-api-access-xww5m\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.803993 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.804017 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-kolla-config\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.804053 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/45c951d2-5c76-4a36-a53c-47559731095e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.804088 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.804117 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c951d2-5c76-4a36-a53c-47559731095e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.804595 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.805353 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/45c951d2-5c76-4a36-a53c-47559731095e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.805397 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-kolla-config\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.806257 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-config-data-default\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.807366 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c951d2-5c76-4a36-a53c-47559731095e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.812715 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c951d2-5c76-4a36-a53c-47559731095e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.815660 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c951d2-5c76-4a36-a53c-47559731095e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.827582 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.836463 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xww5m\" (UniqueName: \"kubernetes.io/projected/45c951d2-5c76-4a36-a53c-47559731095e-kube-api-access-xww5m\") pod \"openstack-galera-0\" (UID: \"45c951d2-5c76-4a36-a53c-47559731095e\") " pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.918181 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.939365 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c40041cb-9206-4776-98ee-30a6c1cf5a85","Type":"ContainerStarted","Data":"b41e42ee33612162f8ca368f621b9b93c6a71cd227ab4a6ecd2dadcb86aeccda"} Jan 22 16:22:03 crc kubenswrapper[5032]: I0122 16:22:03.941026 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b","Type":"ContainerStarted","Data":"19eff6f7ac235b07d05807e0b7a2b0ee3d992fdf3a8eba6e85788ebb0e6106c0"} Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.862312 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.863492 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.876359 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.876645 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.879509 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.879839 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-9r544" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.897025 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.921981 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3124207a-b471-4d1b-a15f-ce4191494ba2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.922034 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.922111 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.922140 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5z62\" (UniqueName: \"kubernetes.io/projected/3124207a-b471-4d1b-a15f-ce4191494ba2-kube-api-access-s5z62\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.922158 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.922333 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3124207a-b471-4d1b-a15f-ce4191494ba2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.922422 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3124207a-b471-4d1b-a15f-ce4191494ba2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:04 crc kubenswrapper[5032]: I0122 16:22:04.922487 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023639 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023709 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3124207a-b471-4d1b-a15f-ce4191494ba2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023742 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023784 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023821 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5z62\" (UniqueName: \"kubernetes.io/projected/3124207a-b471-4d1b-a15f-ce4191494ba2-kube-api-access-s5z62\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023845 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023871 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3124207a-b471-4d1b-a15f-ce4191494ba2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.023908 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3124207a-b471-4d1b-a15f-ce4191494ba2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.024266 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.025047 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3124207a-b471-4d1b-a15f-ce4191494ba2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.026073 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.027607 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.028258 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3124207a-b471-4d1b-a15f-ce4191494ba2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.031533 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3124207a-b471-4d1b-a15f-ce4191494ba2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.034504 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3124207a-b471-4d1b-a15f-ce4191494ba2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.063407 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.063416 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5z62\" (UniqueName: \"kubernetes.io/projected/3124207a-b471-4d1b-a15f-ce4191494ba2-kube-api-access-s5z62\") pod \"openstack-cell1-galera-0\" (UID: \"3124207a-b471-4d1b-a15f-ce4191494ba2\") " pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.097768 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.098641 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.103504 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.103654 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-nbb85" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.103899 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.115381 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.203735 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.238319 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8122c1e4-e2c9-4d65-bd43-be04477fd674-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.238363 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8122c1e4-e2c9-4d65-bd43-be04477fd674-kolla-config\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.238379 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8122c1e4-e2c9-4d65-bd43-be04477fd674-config-data\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.238471 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8122c1e4-e2c9-4d65-bd43-be04477fd674-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.238505 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6j5n\" (UniqueName: \"kubernetes.io/projected/8122c1e4-e2c9-4d65-bd43-be04477fd674-kube-api-access-w6j5n\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.340470 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8122c1e4-e2c9-4d65-bd43-be04477fd674-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.340526 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6j5n\" (UniqueName: \"kubernetes.io/projected/8122c1e4-e2c9-4d65-bd43-be04477fd674-kube-api-access-w6j5n\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.340617 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8122c1e4-e2c9-4d65-bd43-be04477fd674-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.340637 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8122c1e4-e2c9-4d65-bd43-be04477fd674-kolla-config\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.340651 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8122c1e4-e2c9-4d65-bd43-be04477fd674-config-data\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.341683 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8122c1e4-e2c9-4d65-bd43-be04477fd674-config-data\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.341906 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8122c1e4-e2c9-4d65-bd43-be04477fd674-kolla-config\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.344663 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8122c1e4-e2c9-4d65-bd43-be04477fd674-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.344745 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8122c1e4-e2c9-4d65-bd43-be04477fd674-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.371358 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6j5n\" (UniqueName: \"kubernetes.io/projected/8122c1e4-e2c9-4d65-bd43-be04477fd674-kube-api-access-w6j5n\") pod \"memcached-0\" (UID: \"8122c1e4-e2c9-4d65-bd43-be04477fd674\") " pod="openstack/memcached-0" Jan 22 16:22:05 crc kubenswrapper[5032]: I0122 16:22:05.421350 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 16:22:06 crc kubenswrapper[5032]: I0122 16:22:06.939248 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:22:06 crc kubenswrapper[5032]: I0122 16:22:06.941381 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 16:22:06 crc kubenswrapper[5032]: I0122 16:22:06.945925 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-gkvs9" Jan 22 16:22:06 crc kubenswrapper[5032]: I0122 16:22:06.949758 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:22:07 crc kubenswrapper[5032]: I0122 16:22:07.082720 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb4dk\" (UniqueName: \"kubernetes.io/projected/87fc46da-d927-4dff-9459-e5671ee0b0c4-kube-api-access-pb4dk\") pod \"kube-state-metrics-0\" (UID: \"87fc46da-d927-4dff-9459-e5671ee0b0c4\") " pod="openstack/kube-state-metrics-0" Jan 22 16:22:07 crc kubenswrapper[5032]: I0122 16:22:07.185695 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb4dk\" (UniqueName: \"kubernetes.io/projected/87fc46da-d927-4dff-9459-e5671ee0b0c4-kube-api-access-pb4dk\") pod \"kube-state-metrics-0\" (UID: \"87fc46da-d927-4dff-9459-e5671ee0b0c4\") " pod="openstack/kube-state-metrics-0" Jan 22 16:22:07 crc kubenswrapper[5032]: I0122 16:22:07.208788 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb4dk\" (UniqueName: \"kubernetes.io/projected/87fc46da-d927-4dff-9459-e5671ee0b0c4-kube-api-access-pb4dk\") pod \"kube-state-metrics-0\" (UID: \"87fc46da-d927-4dff-9459-e5671ee0b0c4\") " pod="openstack/kube-state-metrics-0" Jan 22 16:22:07 crc kubenswrapper[5032]: I0122 16:22:07.283272 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.833979 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8rk6c"] Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.835457 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.837619 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.838224 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-glm86" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.838613 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.858702 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8rk6c"] Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.906341 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-7s9l4"] Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.908101 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.917405 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-7s9l4"] Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957322 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-etc-ovs\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957376 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-run\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957404 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-log-ovn\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957438 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qg2g\" (UniqueName: \"kubernetes.io/projected/468c8792-5087-4ced-909b-e36976aa235a-kube-api-access-9qg2g\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957608 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b60d899c-ce17-4f38-97ea-533ae4e56597-ovn-controller-tls-certs\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957725 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-run-ovn\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957781 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-log\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957798 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-lib\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957847 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b60d899c-ce17-4f38-97ea-533ae4e56597-scripts\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957893 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b60d899c-ce17-4f38-97ea-533ae4e56597-combined-ca-bundle\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.957979 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-run\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.958156 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/468c8792-5087-4ced-909b-e36976aa235a-scripts\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:10 crc kubenswrapper[5032]: I0122 16:22:10.958259 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgnls\" (UniqueName: \"kubernetes.io/projected/b60d899c-ce17-4f38-97ea-533ae4e56597-kube-api-access-qgnls\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059375 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qg2g\" (UniqueName: \"kubernetes.io/projected/468c8792-5087-4ced-909b-e36976aa235a-kube-api-access-9qg2g\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059432 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b60d899c-ce17-4f38-97ea-533ae4e56597-ovn-controller-tls-certs\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059470 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-run-ovn\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059501 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-log\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059522 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-lib\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059556 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b60d899c-ce17-4f38-97ea-533ae4e56597-scripts\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059575 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b60d899c-ce17-4f38-97ea-533ae4e56597-combined-ca-bundle\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059619 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-run\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059652 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/468c8792-5087-4ced-909b-e36976aa235a-scripts\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059689 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgnls\" (UniqueName: \"kubernetes.io/projected/b60d899c-ce17-4f38-97ea-533ae4e56597-kube-api-access-qgnls\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059715 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-etc-ovs\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059739 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-run\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.059755 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-log-ovn\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.060309 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-log-ovn\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.060400 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-etc-ovs\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.060439 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-run\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.060398 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-run-ovn\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.060461 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b60d899c-ce17-4f38-97ea-533ae4e56597-var-run\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.060565 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-lib\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.060686 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/468c8792-5087-4ced-909b-e36976aa235a-var-log\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.062598 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b60d899c-ce17-4f38-97ea-533ae4e56597-scripts\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.063897 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/468c8792-5087-4ced-909b-e36976aa235a-scripts\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.065769 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b60d899c-ce17-4f38-97ea-533ae4e56597-ovn-controller-tls-certs\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.065880 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b60d899c-ce17-4f38-97ea-533ae4e56597-combined-ca-bundle\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.075895 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgnls\" (UniqueName: \"kubernetes.io/projected/b60d899c-ce17-4f38-97ea-533ae4e56597-kube-api-access-qgnls\") pod \"ovn-controller-8rk6c\" (UID: \"b60d899c-ce17-4f38-97ea-533ae4e56597\") " pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.083390 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qg2g\" (UniqueName: \"kubernetes.io/projected/468c8792-5087-4ced-909b-e36976aa235a-kube-api-access-9qg2g\") pod \"ovn-controller-ovs-7s9l4\" (UID: \"468c8792-5087-4ced-909b-e36976aa235a\") " pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.152887 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.266371 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.753055 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.755123 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.759924 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.760025 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.760801 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.761199 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-6mwsd" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.761369 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.767139 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.874784 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-config\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.875969 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.876095 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.876176 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.876277 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.876371 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jhlc\" (UniqueName: \"kubernetes.io/projected/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-kube-api-access-5jhlc\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.876466 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.876557 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978515 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jhlc\" (UniqueName: \"kubernetes.io/projected/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-kube-api-access-5jhlc\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978596 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978630 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978680 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-config\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978785 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978808 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978833 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.978883 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.979277 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.979332 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.979819 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-config\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.980111 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.984604 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.984710 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:11 crc kubenswrapper[5032]: I0122 16:22:11.987404 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:12 crc kubenswrapper[5032]: I0122 16:22:12.002048 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:12 crc kubenswrapper[5032]: I0122 16:22:12.012458 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jhlc\" (UniqueName: \"kubernetes.io/projected/4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec-kube-api-access-5jhlc\") pod \"ovsdbserver-nb-0\" (UID: \"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec\") " pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:12 crc kubenswrapper[5032]: I0122 16:22:12.075290 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.649781 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.651334 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.653650 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.653647 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-fdzxp" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.654050 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.654205 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.668374 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723017 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/277fb0f4-f6b8-4e75-91dc-9879c7781871-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723066 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723089 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/277fb0f4-f6b8-4e75-91dc-9879c7781871-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723118 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277fb0f4-f6b8-4e75-91dc-9879c7781871-config\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723189 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723221 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723246 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.723276 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrtkp\" (UniqueName: \"kubernetes.io/projected/277fb0f4-f6b8-4e75-91dc-9879c7781871-kube-api-access-xrtkp\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825169 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277fb0f4-f6b8-4e75-91dc-9879c7781871-config\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825223 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825251 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825276 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825308 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrtkp\" (UniqueName: \"kubernetes.io/projected/277fb0f4-f6b8-4e75-91dc-9879c7781871-kube-api-access-xrtkp\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825368 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/277fb0f4-f6b8-4e75-91dc-9879c7781871-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825391 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825408 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/277fb0f4-f6b8-4e75-91dc-9879c7781871-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.825632 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.826028 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/277fb0f4-f6b8-4e75-91dc-9879c7781871-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.826112 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277fb0f4-f6b8-4e75-91dc-9879c7781871-config\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.826522 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/277fb0f4-f6b8-4e75-91dc-9879c7781871-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.831345 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.838718 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.840297 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/277fb0f4-f6b8-4e75-91dc-9879c7781871-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.842827 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrtkp\" (UniqueName: \"kubernetes.io/projected/277fb0f4-f6b8-4e75-91dc-9879c7781871-kube-api-access-xrtkp\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.845547 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"277fb0f4-f6b8-4e75-91dc-9879c7781871\") " pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:14 crc kubenswrapper[5032]: I0122 16:22:14.968208 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:21 crc kubenswrapper[5032]: E0122 16:22:21.125791 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 16:22:21 crc kubenswrapper[5032]: E0122 16:22:21.126772 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n9h26,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-829k5_openstack(ac662245-f579-42a6-b40d-89640103cd3c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:22:21 crc kubenswrapper[5032]: E0122 16:22:21.128019 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" podUID="ac662245-f579-42a6-b40d-89640103cd3c" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.239422 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.240047 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-54ctz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-c55lx_openstack(9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.240784 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.240931 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dkc89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-g7w74_openstack(6cbba5f7-916d-4ba9-8c15-f84cd472f36d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.241901 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" podUID="9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.242921 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" podUID="6cbba5f7-916d-4ba9-8c15-f84cd472f36d" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.264643 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.264808 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-trqjj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-zrvjp_openstack(ee318086-45e9-4f61-bc62-3ac4024e2635): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:22:22 crc kubenswrapper[5032]: E0122 16:22:22.266128 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.434995 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.481032 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-config\") pod \"ac662245-f579-42a6-b40d-89640103cd3c\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.481187 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9h26\" (UniqueName: \"kubernetes.io/projected/ac662245-f579-42a6-b40d-89640103cd3c-kube-api-access-n9h26\") pod \"ac662245-f579-42a6-b40d-89640103cd3c\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.481226 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-dns-svc\") pod \"ac662245-f579-42a6-b40d-89640103cd3c\" (UID: \"ac662245-f579-42a6-b40d-89640103cd3c\") " Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.482150 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ac662245-f579-42a6-b40d-89640103cd3c" (UID: "ac662245-f579-42a6-b40d-89640103cd3c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.482733 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-config" (OuterVolumeSpecName: "config") pod "ac662245-f579-42a6-b40d-89640103cd3c" (UID: "ac662245-f579-42a6-b40d-89640103cd3c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.490747 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac662245-f579-42a6-b40d-89640103cd3c-kube-api-access-n9h26" (OuterVolumeSpecName: "kube-api-access-n9h26") pod "ac662245-f579-42a6-b40d-89640103cd3c" (UID: "ac662245-f579-42a6-b40d-89640103cd3c"). InnerVolumeSpecName "kube-api-access-n9h26". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.583599 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9h26\" (UniqueName: \"kubernetes.io/projected/ac662245-f579-42a6-b40d-89640103cd3c-kube-api-access-n9h26\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.583895 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.583904 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac662245-f579-42a6-b40d-89640103cd3c-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.702238 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.866415 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:22:22 crc kubenswrapper[5032]: W0122 16:22:22.872599 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87fc46da_d927_4dff_9459_e5671ee0b0c4.slice/crio-54c10a8583aad587e9f429897e9ac602f31b11c38134f25ef67bd7bf1467b3e5 WatchSource:0}: Error finding container 54c10a8583aad587e9f429897e9ac602f31b11c38134f25ef67bd7bf1467b3e5: Status 404 returned error can't find the container with id 54c10a8583aad587e9f429897e9ac602f31b11c38134f25ef67bd7bf1467b3e5 Jan 22 16:22:22 crc kubenswrapper[5032]: W0122 16:22:22.890841 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45c951d2_5c76_4a36_a53c_47559731095e.slice/crio-51eefcd367879b94d8c95c9557adb883ab46b7b69abbe0f5dcf89c27b4d0c82b WatchSource:0}: Error finding container 51eefcd367879b94d8c95c9557adb883ab46b7b69abbe0f5dcf89c27b4d0c82b: Status 404 returned error can't find the container with id 51eefcd367879b94d8c95c9557adb883ab46b7b69abbe0f5dcf89c27b4d0c82b Jan 22 16:22:22 crc kubenswrapper[5032]: I0122 16:22:22.892890 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.036845 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-7s9l4"] Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.048239 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8rk6c"] Jan 22 16:22:23 crc kubenswrapper[5032]: W0122 16:22:23.054304 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3124207a_b471_4d1b_a15f_ce4191494ba2.slice/crio-9d8a46aaf2e90970c8869dac4511fd7e21193224f27efd09e40cccb0ce1c8844 WatchSource:0}: Error finding container 9d8a46aaf2e90970c8869dac4511fd7e21193224f27efd09e40cccb0ce1c8844: Status 404 returned error can't find the container with id 9d8a46aaf2e90970c8869dac4511fd7e21193224f27efd09e40cccb0ce1c8844 Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.055331 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.093765 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8122c1e4-e2c9-4d65-bd43-be04477fd674","Type":"ContainerStarted","Data":"a391fec8a19c12b74717f2c440a19f8f31544963a5e50ed341b6759b1f1f8771"} Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.094677 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-7s9l4" event={"ID":"468c8792-5087-4ced-909b-e36976aa235a","Type":"ContainerStarted","Data":"a440adebbb410760a4fd50b7dba077785cffbc0fd643efdbbc2b6fe72eb22e3b"} Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.095824 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" event={"ID":"ac662245-f579-42a6-b40d-89640103cd3c","Type":"ContainerDied","Data":"25d656f9aa64a93f7d1154c268004970d2b353a90d5b9360ab16271733ebb4ec"} Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.095870 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-829k5" Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.097027 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"87fc46da-d927-4dff-9459-e5671ee0b0c4","Type":"ContainerStarted","Data":"54c10a8583aad587e9f429897e9ac602f31b11c38134f25ef67bd7bf1467b3e5"} Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.098436 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8rk6c" event={"ID":"b60d899c-ce17-4f38-97ea-533ae4e56597","Type":"ContainerStarted","Data":"4cf8e790cd8bd12aefbc5db69df33681adce2eb5608c80f012afba36f7fa52cb"} Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.101302 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"45c951d2-5c76-4a36-a53c-47559731095e","Type":"ContainerStarted","Data":"51eefcd367879b94d8c95c9557adb883ab46b7b69abbe0f5dcf89c27b4d0c82b"} Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.102697 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3124207a-b471-4d1b-a15f-ce4191494ba2","Type":"ContainerStarted","Data":"9d8a46aaf2e90970c8869dac4511fd7e21193224f27efd09e40cccb0ce1c8844"} Jan 22 16:22:23 crc kubenswrapper[5032]: E0122 16:22:23.104298 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" Jan 22 16:22:23 crc kubenswrapper[5032]: E0122 16:22:23.105908 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" podUID="9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2" Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.144394 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 16:22:23 crc kubenswrapper[5032]: W0122 16:22:23.148097 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod277fb0f4_f6b8_4e75_91dc_9879c7781871.slice/crio-239d233e8ccc5252ca0fd287b80aba9009fb9ac58011e5741d286dcc8ae9c32c WatchSource:0}: Error finding container 239d233e8ccc5252ca0fd287b80aba9009fb9ac58011e5741d286dcc8ae9c32c: Status 404 returned error can't find the container with id 239d233e8ccc5252ca0fd287b80aba9009fb9ac58011e5741d286dcc8ae9c32c Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.231501 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-829k5"] Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.237069 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-829k5"] Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.606925 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.804434 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-config\") pod \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.804522 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkc89\" (UniqueName: \"kubernetes.io/projected/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-kube-api-access-dkc89\") pod \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\" (UID: \"6cbba5f7-916d-4ba9-8c15-f84cd472f36d\") " Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.804945 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-config" (OuterVolumeSpecName: "config") pod "6cbba5f7-916d-4ba9-8c15-f84cd472f36d" (UID: "6cbba5f7-916d-4ba9-8c15-f84cd472f36d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.811152 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-kube-api-access-dkc89" (OuterVolumeSpecName: "kube-api-access-dkc89") pod "6cbba5f7-916d-4ba9-8c15-f84cd472f36d" (UID: "6cbba5f7-916d-4ba9-8c15-f84cd472f36d"). InnerVolumeSpecName "kube-api-access-dkc89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.907081 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:23 crc kubenswrapper[5032]: I0122 16:22:23.907393 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkc89\" (UniqueName: \"kubernetes.io/projected/6cbba5f7-916d-4ba9-8c15-f84cd472f36d-kube-api-access-dkc89\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.096933 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.111613 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b","Type":"ContainerStarted","Data":"8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496"} Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.113220 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"277fb0f4-f6b8-4e75-91dc-9879c7781871","Type":"ContainerStarted","Data":"239d233e8ccc5252ca0fd287b80aba9009fb9ac58011e5741d286dcc8ae9c32c"} Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.114413 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" event={"ID":"6cbba5f7-916d-4ba9-8c15-f84cd472f36d","Type":"ContainerDied","Data":"fef669d6c26002d928f115c815876a7c17d6ad25c655696f3cfae3578940da4e"} Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.114481 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g7w74" Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.119094 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c40041cb-9206-4776-98ee-30a6c1cf5a85","Type":"ContainerStarted","Data":"4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76"} Jan 22 16:22:24 crc kubenswrapper[5032]: W0122 16:22:24.132402 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d3ec4e9_0847_4d54_9d7a_e9455e8e85ec.slice/crio-60a0069be2f8990cd2052bb16e8de379fdb141d2d7245c08a0e3ed21c3a9816b WatchSource:0}: Error finding container 60a0069be2f8990cd2052bb16e8de379fdb141d2d7245c08a0e3ed21c3a9816b: Status 404 returned error can't find the container with id 60a0069be2f8990cd2052bb16e8de379fdb141d2d7245c08a0e3ed21c3a9816b Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.204520 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g7w74"] Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.208912 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g7w74"] Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.466983 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cbba5f7-916d-4ba9-8c15-f84cd472f36d" path="/var/lib/kubelet/pods/6cbba5f7-916d-4ba9-8c15-f84cd472f36d/volumes" Jan 22 16:22:24 crc kubenswrapper[5032]: I0122 16:22:24.467407 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac662245-f579-42a6-b40d-89640103cd3c" path="/var/lib/kubelet/pods/ac662245-f579-42a6-b40d-89640103cd3c/volumes" Jan 22 16:22:25 crc kubenswrapper[5032]: I0122 16:22:25.127342 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec","Type":"ContainerStarted","Data":"60a0069be2f8990cd2052bb16e8de379fdb141d2d7245c08a0e3ed21c3a9816b"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.162902 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"45c951d2-5c76-4a36-a53c-47559731095e","Type":"ContainerStarted","Data":"62446e1f60e8dbb0720e376d7d98990181f3c140483e0f87624a287ecf7d100e"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.164457 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3124207a-b471-4d1b-a15f-ce4191494ba2","Type":"ContainerStarted","Data":"317ab6125e0f229b59e0ba5ba46d03fb8933398e9e866de33973090f58822660"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.166027 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec","Type":"ContainerStarted","Data":"57adde6579f6ab1aaad6e64315190ea97f7bab8bc262dd20d5e41087a3c81ec2"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.167961 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8122c1e4-e2c9-4d65-bd43-be04477fd674","Type":"ContainerStarted","Data":"0b5103dc9473a131aaf936a28e2cf5506e290d85b3f2cd80741c6b395837da00"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.168170 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.170252 5032 generic.go:334] "Generic (PLEG): container finished" podID="468c8792-5087-4ced-909b-e36976aa235a" containerID="e2deb262c1c91ae3b8dbccac746445182527d8f11d73dba8e141dcb03ed8b49c" exitCode=0 Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.170372 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-7s9l4" event={"ID":"468c8792-5087-4ced-909b-e36976aa235a","Type":"ContainerDied","Data":"e2deb262c1c91ae3b8dbccac746445182527d8f11d73dba8e141dcb03ed8b49c"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.172271 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"87fc46da-d927-4dff-9459-e5671ee0b0c4","Type":"ContainerStarted","Data":"a2a12d2f91247f8366f1a3665e2845e4952deef887313c793579236078e3cb22"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.172392 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.173980 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8rk6c" event={"ID":"b60d899c-ce17-4f38-97ea-533ae4e56597","Type":"ContainerStarted","Data":"04404eef41fee3dc771c40407f106db359f06e1b73979360c0f642b85e86c401"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.174621 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-8rk6c" Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.175799 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"277fb0f4-f6b8-4e75-91dc-9879c7781871","Type":"ContainerStarted","Data":"4e062b673ed138c2fb366ba004a31b6c99bba8e8ddcf7866bc3fc4aebeefbafc"} Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.315435 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=19.463316408 podStartE2EDuration="25.31541715s" podCreationTimestamp="2026-01-22 16:22:05 +0000 UTC" firstStartedPulling="2026-01-22 16:22:22.708246497 +0000 UTC m=+1046.520415538" lastFinishedPulling="2026-01-22 16:22:28.560347209 +0000 UTC m=+1052.372516280" observedRunningTime="2026-01-22 16:22:30.293253627 +0000 UTC m=+1054.105422668" watchObservedRunningTime="2026-01-22 16:22:30.31541715 +0000 UTC m=+1054.127586171" Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.350602 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8rk6c" podStartSLOduration=14.118067709 podStartE2EDuration="20.35056673s" podCreationTimestamp="2026-01-22 16:22:10 +0000 UTC" firstStartedPulling="2026-01-22 16:22:23.053379484 +0000 UTC m=+1046.865548515" lastFinishedPulling="2026-01-22 16:22:29.285878505 +0000 UTC m=+1053.098047536" observedRunningTime="2026-01-22 16:22:30.31166437 +0000 UTC m=+1054.123833401" watchObservedRunningTime="2026-01-22 16:22:30.35056673 +0000 UTC m=+1054.162735751" Jan 22 16:22:30 crc kubenswrapper[5032]: I0122 16:22:30.396999 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=17.89519577 podStartE2EDuration="24.39696935s" podCreationTimestamp="2026-01-22 16:22:06 +0000 UTC" firstStartedPulling="2026-01-22 16:22:22.875141989 +0000 UTC m=+1046.687311020" lastFinishedPulling="2026-01-22 16:22:29.376915569 +0000 UTC m=+1053.189084600" observedRunningTime="2026-01-22 16:22:30.391008741 +0000 UTC m=+1054.203177782" watchObservedRunningTime="2026-01-22 16:22:30.39696935 +0000 UTC m=+1054.209138381" Jan 22 16:22:31 crc kubenswrapper[5032]: I0122 16:22:31.188979 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-7s9l4" event={"ID":"468c8792-5087-4ced-909b-e36976aa235a","Type":"ContainerStarted","Data":"c9ab3877dcb635bbc49f960465d874b3fb9302febe1de87f9580621b0afa319e"} Jan 22 16:22:31 crc kubenswrapper[5032]: I0122 16:22:31.189262 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-7s9l4" event={"ID":"468c8792-5087-4ced-909b-e36976aa235a","Type":"ContainerStarted","Data":"a51f48b931c8885c36eb598be2339d3646ce53d3f02ce472f44313e1dae89438"} Jan 22 16:22:31 crc kubenswrapper[5032]: I0122 16:22:31.219031 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-7s9l4" podStartSLOduration=15.721619058 podStartE2EDuration="21.219014737s" podCreationTimestamp="2026-01-22 16:22:10 +0000 UTC" firstStartedPulling="2026-01-22 16:22:23.062668472 +0000 UTC m=+1046.874837503" lastFinishedPulling="2026-01-22 16:22:28.560064151 +0000 UTC m=+1052.372233182" observedRunningTime="2026-01-22 16:22:31.211909937 +0000 UTC m=+1055.024078978" watchObservedRunningTime="2026-01-22 16:22:31.219014737 +0000 UTC m=+1055.031183768" Jan 22 16:22:31 crc kubenswrapper[5032]: I0122 16:22:31.266597 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:31 crc kubenswrapper[5032]: I0122 16:22:31.266662 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:22:33 crc kubenswrapper[5032]: I0122 16:22:33.211121 5032 generic.go:334] "Generic (PLEG): container finished" podID="45c951d2-5c76-4a36-a53c-47559731095e" containerID="62446e1f60e8dbb0720e376d7d98990181f3c140483e0f87624a287ecf7d100e" exitCode=0 Jan 22 16:22:33 crc kubenswrapper[5032]: I0122 16:22:33.211218 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"45c951d2-5c76-4a36-a53c-47559731095e","Type":"ContainerDied","Data":"62446e1f60e8dbb0720e376d7d98990181f3c140483e0f87624a287ecf7d100e"} Jan 22 16:22:33 crc kubenswrapper[5032]: I0122 16:22:33.228087 5032 generic.go:334] "Generic (PLEG): container finished" podID="3124207a-b471-4d1b-a15f-ce4191494ba2" containerID="317ab6125e0f229b59e0ba5ba46d03fb8933398e9e866de33973090f58822660" exitCode=0 Jan 22 16:22:33 crc kubenswrapper[5032]: I0122 16:22:33.228206 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3124207a-b471-4d1b-a15f-ce4191494ba2","Type":"ContainerDied","Data":"317ab6125e0f229b59e0ba5ba46d03fb8933398e9e866de33973090f58822660"} Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.236565 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"277fb0f4-f6b8-4e75-91dc-9879c7781871","Type":"ContainerStarted","Data":"391b231da5a4d905228acb5770de09993f69ce9766afa51ac3976f9b1d39558b"} Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.238637 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"45c951d2-5c76-4a36-a53c-47559731095e","Type":"ContainerStarted","Data":"275c884e1f2f6e32cfdfcfcce195856b772fd21792afa9a0c6332e8f5f4f3ef5"} Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.242716 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3124207a-b471-4d1b-a15f-ce4191494ba2","Type":"ContainerStarted","Data":"88cc52a7e17f2b7dcb9387294b6862b86370f0c2ab2550393713cee4162a1de6"} Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.245202 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec","Type":"ContainerStarted","Data":"85dc152814454af72954cc9526ef4648a21511ebd5338383a10808f3578c77ec"} Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.246736 5032 generic.go:334] "Generic (PLEG): container finished" podID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerID="1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325" exitCode=0 Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.246781 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" event={"ID":"ee318086-45e9-4f61-bc62-3ac4024e2635","Type":"ContainerDied","Data":"1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325"} Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.261056 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=11.150674409 podStartE2EDuration="21.261035273s" podCreationTimestamp="2026-01-22 16:22:13 +0000 UTC" firstStartedPulling="2026-01-22 16:22:23.150918531 +0000 UTC m=+1046.963087562" lastFinishedPulling="2026-01-22 16:22:33.261279395 +0000 UTC m=+1057.073448426" observedRunningTime="2026-01-22 16:22:34.253000398 +0000 UTC m=+1058.065169459" watchObservedRunningTime="2026-01-22 16:22:34.261035273 +0000 UTC m=+1058.073204324" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.307143 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=15.205667844 podStartE2EDuration="24.307117285s" podCreationTimestamp="2026-01-22 16:22:10 +0000 UTC" firstStartedPulling="2026-01-22 16:22:24.144851724 +0000 UTC m=+1047.957020745" lastFinishedPulling="2026-01-22 16:22:33.246301145 +0000 UTC m=+1057.058470186" observedRunningTime="2026-01-22 16:22:34.293179793 +0000 UTC m=+1058.105348834" watchObservedRunningTime="2026-01-22 16:22:34.307117285 +0000 UTC m=+1058.119286326" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.320204 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=25.925405784 podStartE2EDuration="32.320181274s" podCreationTimestamp="2026-01-22 16:22:02 +0000 UTC" firstStartedPulling="2026-01-22 16:22:22.894063434 +0000 UTC m=+1046.706232465" lastFinishedPulling="2026-01-22 16:22:29.288838924 +0000 UTC m=+1053.101007955" observedRunningTime="2026-01-22 16:22:34.317472842 +0000 UTC m=+1058.129641873" watchObservedRunningTime="2026-01-22 16:22:34.320181274 +0000 UTC m=+1058.132350305" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.356791 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-t96m6"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.358152 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.364999 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.379376 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=25.477107383 podStartE2EDuration="31.379355496s" podCreationTimestamp="2026-01-22 16:22:03 +0000 UTC" firstStartedPulling="2026-01-22 16:22:23.057773211 +0000 UTC m=+1046.869942242" lastFinishedPulling="2026-01-22 16:22:28.960021324 +0000 UTC m=+1052.772190355" observedRunningTime="2026-01-22 16:22:34.365408124 +0000 UTC m=+1058.177577175" watchObservedRunningTime="2026-01-22 16:22:34.379355496 +0000 UTC m=+1058.191524527" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.404492 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-t96m6"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.483078 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x2jv\" (UniqueName: \"kubernetes.io/projected/14f8d6a2-ecc6-468e-82af-b035f56827a3-kube-api-access-8x2jv\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.483157 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/14f8d6a2-ecc6-468e-82af-b035f56827a3-ovs-rundir\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.483305 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/14f8d6a2-ecc6-468e-82af-b035f56827a3-ovn-rundir\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.483417 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14f8d6a2-ecc6-468e-82af-b035f56827a3-combined-ca-bundle\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.483461 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/14f8d6a2-ecc6-468e-82af-b035f56827a3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.483488 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14f8d6a2-ecc6-468e-82af-b035f56827a3-config\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.503439 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zrvjp"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.536115 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-f9m6z"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.537342 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.542444 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.548799 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-f9m6z"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.585965 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x2jv\" (UniqueName: \"kubernetes.io/projected/14f8d6a2-ecc6-468e-82af-b035f56827a3-kube-api-access-8x2jv\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.586052 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/14f8d6a2-ecc6-468e-82af-b035f56827a3-ovs-rundir\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.586095 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/14f8d6a2-ecc6-468e-82af-b035f56827a3-ovn-rundir\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.586200 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14f8d6a2-ecc6-468e-82af-b035f56827a3-combined-ca-bundle\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.586230 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/14f8d6a2-ecc6-468e-82af-b035f56827a3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.586258 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14f8d6a2-ecc6-468e-82af-b035f56827a3-config\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.586559 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/14f8d6a2-ecc6-468e-82af-b035f56827a3-ovn-rundir\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.587177 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14f8d6a2-ecc6-468e-82af-b035f56827a3-config\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.587938 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/14f8d6a2-ecc6-468e-82af-b035f56827a3-ovs-rundir\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.598646 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/14f8d6a2-ecc6-468e-82af-b035f56827a3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.599466 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14f8d6a2-ecc6-468e-82af-b035f56827a3-combined-ca-bundle\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.610436 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x2jv\" (UniqueName: \"kubernetes.io/projected/14f8d6a2-ecc6-468e-82af-b035f56827a3-kube-api-access-8x2jv\") pod \"ovn-controller-metrics-t96m6\" (UID: \"14f8d6a2-ecc6-468e-82af-b035f56827a3\") " pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.650422 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-c55lx"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.687817 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.687907 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.688002 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktrqx\" (UniqueName: \"kubernetes.io/projected/d15a295c-73fb-4733-b057-dc42e023a51a-kube-api-access-ktrqx\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.688032 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-config\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.699669 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-xlhtd"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.700910 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.705110 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.708664 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-t96m6" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.717652 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-xlhtd"] Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.789776 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-config\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.789832 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktrqx\" (UniqueName: \"kubernetes.io/projected/d15a295c-73fb-4733-b057-dc42e023a51a-kube-api-access-ktrqx\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.789877 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.789908 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-config\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.789965 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njd25\" (UniqueName: \"kubernetes.io/projected/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-kube-api-access-njd25\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.789999 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-dns-svc\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.790031 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.790065 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.790087 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.790839 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-config\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.791397 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.791976 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.808784 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktrqx\" (UniqueName: \"kubernetes.io/projected/d15a295c-73fb-4733-b057-dc42e023a51a-kube-api-access-ktrqx\") pod \"dnsmasq-dns-5bf47b49b7-f9m6z\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.874762 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.891087 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-config\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.891916 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-config\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.891969 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.892023 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njd25\" (UniqueName: \"kubernetes.io/projected/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-kube-api-access-njd25\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.892053 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-dns-svc\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.892084 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.892967 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.893039 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-dns-svc\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.895271 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.910959 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njd25\" (UniqueName: \"kubernetes.io/projected/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-kube-api-access-njd25\") pod \"dnsmasq-dns-8554648995-xlhtd\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.928762 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:34 crc kubenswrapper[5032]: I0122 16:22:34.968520 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.036215 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.095445 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-dns-svc\") pod \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.095491 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54ctz\" (UniqueName: \"kubernetes.io/projected/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-kube-api-access-54ctz\") pod \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.095564 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-config\") pod \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\" (UID: \"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2\") " Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.096481 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-config" (OuterVolumeSpecName: "config") pod "9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2" (UID: "9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.096817 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2" (UID: "9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.101621 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-kube-api-access-54ctz" (OuterVolumeSpecName: "kube-api-access-54ctz") pod "9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2" (UID: "9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2"). InnerVolumeSpecName "kube-api-access-54ctz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.203622 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.203690 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54ctz\" (UniqueName: \"kubernetes.io/projected/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-kube-api-access-54ctz\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.203705 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.204903 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.204992 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.240265 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-t96m6"] Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.258977 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" event={"ID":"9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2","Type":"ContainerDied","Data":"b214ff1a7946d73b19742e2196e45f0b009919f3405bf697f62e4ac1be99f1e7"} Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.259060 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-c55lx" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.263938 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" event={"ID":"ee318086-45e9-4f61-bc62-3ac4024e2635","Type":"ContainerStarted","Data":"e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341"} Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.264740 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerName="dnsmasq-dns" containerID="cri-o://e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341" gracePeriod=10 Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.376094 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" podStartSLOduration=3.1543675589999998 podStartE2EDuration="35.376076763s" podCreationTimestamp="2026-01-22 16:22:00 +0000 UTC" firstStartedPulling="2026-01-22 16:22:01.779041741 +0000 UTC m=+1025.591210772" lastFinishedPulling="2026-01-22 16:22:34.000750875 +0000 UTC m=+1057.812919976" observedRunningTime="2026-01-22 16:22:35.282845921 +0000 UTC m=+1059.095014962" watchObservedRunningTime="2026-01-22 16:22:35.376076763 +0000 UTC m=+1059.188245794" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.377842 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-f9m6z"] Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.424133 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 22 16:22:35 crc kubenswrapper[5032]: W0122 16:22:35.434394 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd15a295c_73fb_4733_b057_dc42e023a51a.slice/crio-f609570ddebae20b9686ebab1acef6d54cca31942713d7d6d1c96dc0ad2f4c95 WatchSource:0}: Error finding container f609570ddebae20b9686ebab1acef6d54cca31942713d7d6d1c96dc0ad2f4c95: Status 404 returned error can't find the container with id f609570ddebae20b9686ebab1acef6d54cca31942713d7d6d1c96dc0ad2f4c95 Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.473532 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-xlhtd"] Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.602816 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-c55lx"] Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.619028 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-c55lx"] Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.667932 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.815107 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-config\") pod \"ee318086-45e9-4f61-bc62-3ac4024e2635\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.815561 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trqjj\" (UniqueName: \"kubernetes.io/projected/ee318086-45e9-4f61-bc62-3ac4024e2635-kube-api-access-trqjj\") pod \"ee318086-45e9-4f61-bc62-3ac4024e2635\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.815701 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-dns-svc\") pod \"ee318086-45e9-4f61-bc62-3ac4024e2635\" (UID: \"ee318086-45e9-4f61-bc62-3ac4024e2635\") " Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.819953 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee318086-45e9-4f61-bc62-3ac4024e2635-kube-api-access-trqjj" (OuterVolumeSpecName: "kube-api-access-trqjj") pod "ee318086-45e9-4f61-bc62-3ac4024e2635" (UID: "ee318086-45e9-4f61-bc62-3ac4024e2635"). InnerVolumeSpecName "kube-api-access-trqjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.851176 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-config" (OuterVolumeSpecName: "config") pod "ee318086-45e9-4f61-bc62-3ac4024e2635" (UID: "ee318086-45e9-4f61-bc62-3ac4024e2635"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.857755 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ee318086-45e9-4f61-bc62-3ac4024e2635" (UID: "ee318086-45e9-4f61-bc62-3ac4024e2635"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.917240 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trqjj\" (UniqueName: \"kubernetes.io/projected/ee318086-45e9-4f61-bc62-3ac4024e2635-kube-api-access-trqjj\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.917272 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.917281 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee318086-45e9-4f61-bc62-3ac4024e2635-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:35 crc kubenswrapper[5032]: I0122 16:22:35.968385 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.003833 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.076533 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.111945 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.275103 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-t96m6" event={"ID":"14f8d6a2-ecc6-468e-82af-b035f56827a3","Type":"ContainerStarted","Data":"280d82b3f58616c5c87d24ab5149395aaef74475222b5d1923e48114adff1bb4"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.275171 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-t96m6" event={"ID":"14f8d6a2-ecc6-468e-82af-b035f56827a3","Type":"ContainerStarted","Data":"d06818042cc34f0aeec0ecf4658b95c2ad14538f76158d946a9de993ec06e946"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.279468 5032 generic.go:334] "Generic (PLEG): container finished" podID="d15a295c-73fb-4733-b057-dc42e023a51a" containerID="d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf" exitCode=0 Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.279519 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" event={"ID":"d15a295c-73fb-4733-b057-dc42e023a51a","Type":"ContainerDied","Data":"d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.279558 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" event={"ID":"d15a295c-73fb-4733-b057-dc42e023a51a","Type":"ContainerStarted","Data":"f609570ddebae20b9686ebab1acef6d54cca31942713d7d6d1c96dc0ad2f4c95"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.283633 5032 generic.go:334] "Generic (PLEG): container finished" podID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerID="22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198" exitCode=0 Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.283765 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-xlhtd" event={"ID":"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a","Type":"ContainerDied","Data":"22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.283846 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-xlhtd" event={"ID":"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a","Type":"ContainerStarted","Data":"3d615b97f6fa81fa9186bb2488c9f6c12456b50911d502a11025abf9ca8ffb24"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.285965 5032 generic.go:334] "Generic (PLEG): container finished" podID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerID="e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341" exitCode=0 Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.286042 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.286101 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" event={"ID":"ee318086-45e9-4f61-bc62-3ac4024e2635","Type":"ContainerDied","Data":"e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.286143 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zrvjp" event={"ID":"ee318086-45e9-4f61-bc62-3ac4024e2635","Type":"ContainerDied","Data":"92fdf1fe1f570a1f114c5d85c6bb07a4b6dbad6561340846297e8e9139a10d36"} Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.286172 5032 scope.go:117] "RemoveContainer" containerID="e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.286441 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.310748 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-t96m6" podStartSLOduration=2.31072941 podStartE2EDuration="2.31072941s" podCreationTimestamp="2026-01-22 16:22:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:36.298538344 +0000 UTC m=+1060.110707455" watchObservedRunningTime="2026-01-22 16:22:36.31072941 +0000 UTC m=+1060.122898441" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.377157 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.406543 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.421122 5032 scope.go:117] "RemoveContainer" containerID="1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.486746 5032 scope.go:117] "RemoveContainer" containerID="e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341" Jan 22 16:22:36 crc kubenswrapper[5032]: E0122 16:22:36.488617 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341\": container with ID starting with e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341 not found: ID does not exist" containerID="e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.488670 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341"} err="failed to get container status \"e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341\": rpc error: code = NotFound desc = could not find container \"e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341\": container with ID starting with e5b304fdf4f6f19eca3033f6c546c3da345d14c6a4f2d78aa339744752612341 not found: ID does not exist" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.488700 5032 scope.go:117] "RemoveContainer" containerID="1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325" Jan 22 16:22:36 crc kubenswrapper[5032]: E0122 16:22:36.490087 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325\": container with ID starting with 1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325 not found: ID does not exist" containerID="1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.490260 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325"} err="failed to get container status \"1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325\": rpc error: code = NotFound desc = could not find container \"1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325\": container with ID starting with 1ac3d03a35c060a8d616c9df582206eec03a9a092d2d2580907e81f1d3044325 not found: ID does not exist" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.506209 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2" path="/var/lib/kubelet/pods/9f6a7c4d-c124-4e5a-af25-cc0c5bfc9ef2/volumes" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.506928 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zrvjp"] Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.507311 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zrvjp"] Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.852256 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 22 16:22:36 crc kubenswrapper[5032]: E0122 16:22:36.852580 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerName="init" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.852598 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerName="init" Jan 22 16:22:36 crc kubenswrapper[5032]: E0122 16:22:36.852610 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerName="dnsmasq-dns" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.852616 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerName="dnsmasq-dns" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.852766 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" containerName="dnsmasq-dns" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.853532 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.857479 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.857697 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-fjbs9" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.858495 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.858650 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.867270 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.948000 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.948077 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2012182-a0b0-4312-9235-08bf99c92aa8-scripts\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.948104 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.948133 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2012182-a0b0-4312-9235-08bf99c92aa8-config\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.948202 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zld2x\" (UniqueName: \"kubernetes.io/projected/e2012182-a0b0-4312-9235-08bf99c92aa8-kube-api-access-zld2x\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.948237 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:36 crc kubenswrapper[5032]: I0122 16:22:36.948271 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2012182-a0b0-4312-9235-08bf99c92aa8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.049220 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.049276 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2012182-a0b0-4312-9235-08bf99c92aa8-scripts\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.049298 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.049319 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2012182-a0b0-4312-9235-08bf99c92aa8-config\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.049373 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zld2x\" (UniqueName: \"kubernetes.io/projected/e2012182-a0b0-4312-9235-08bf99c92aa8-kube-api-access-zld2x\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.050245 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2012182-a0b0-4312-9235-08bf99c92aa8-config\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.050261 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2012182-a0b0-4312-9235-08bf99c92aa8-scripts\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.050437 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.050473 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2012182-a0b0-4312-9235-08bf99c92aa8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.050832 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2012182-a0b0-4312-9235-08bf99c92aa8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.053754 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.054121 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.065952 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2012182-a0b0-4312-9235-08bf99c92aa8-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.068552 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zld2x\" (UniqueName: \"kubernetes.io/projected/e2012182-a0b0-4312-9235-08bf99c92aa8-kube-api-access-zld2x\") pod \"ovn-northd-0\" (UID: \"e2012182-a0b0-4312-9235-08bf99c92aa8\") " pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.180155 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.299205 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.452719 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-f9m6z"] Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.504637 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-sn4fm"] Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.505939 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.510932 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-sn4fm"] Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.564831 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.564939 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-config\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.564962 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.564983 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.565026 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnpnt\" (UniqueName: \"kubernetes.io/projected/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-kube-api-access-gnpnt\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.665964 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.666020 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-config\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.666052 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.666081 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.666114 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnpnt\" (UniqueName: \"kubernetes.io/projected/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-kube-api-access-gnpnt\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.666930 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.667100 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-config\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.667563 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.667686 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.684284 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnpnt\" (UniqueName: \"kubernetes.io/projected/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-kube-api-access-gnpnt\") pod \"dnsmasq-dns-b8fbc5445-sn4fm\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.828996 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:37 crc kubenswrapper[5032]: I0122 16:22:37.829522 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.296182 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-sn4fm"] Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.326096 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" event={"ID":"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65","Type":"ContainerStarted","Data":"8490cc383963c20e88655d2048c1601642fc98f9c2dabdb707161f951cc74e39"} Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.327275 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e2012182-a0b0-4312-9235-08bf99c92aa8","Type":"ContainerStarted","Data":"fcb6bc6027583bddc026923a1a450b985c96d17586693d7e1a88969472c9a78f"} Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.463588 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee318086-45e9-4f61-bc62-3ac4024e2635" path="/var/lib/kubelet/pods/ee318086-45e9-4f61-bc62-3ac4024e2635/volumes" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.486506 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.492398 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.494083 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.494292 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-p2d6k" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.496030 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.496150 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.508764 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.580172 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w255g\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-kube-api-access-w255g\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.580325 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.580381 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2f2285d-ead9-458a-a0ef-ed666b86e12f-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.580459 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2f2285d-ead9-458a-a0ef-ed666b86e12f-cache\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.580560 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.580689 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2f2285d-ead9-458a-a0ef-ed666b86e12f-lock\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.682790 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.682837 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2f2285d-ead9-458a-a0ef-ed666b86e12f-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.682882 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2f2285d-ead9-458a-a0ef-ed666b86e12f-cache\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.682920 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.682979 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2f2285d-ead9-458a-a0ef-ed666b86e12f-lock\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.683034 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w255g\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-kube-api-access-w255g\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: E0122 16:22:38.682976 5032 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.683283 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: E0122 16:22:38.683288 5032 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.683356 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2f2285d-ead9-458a-a0ef-ed666b86e12f-cache\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: E0122 16:22:38.683403 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift podName:a2f2285d-ead9-458a-a0ef-ed666b86e12f nodeName:}" failed. No retries permitted until 2026-01-22 16:22:39.183380142 +0000 UTC m=+1062.995549183 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift") pod "swift-storage-0" (UID: "a2f2285d-ead9-458a-a0ef-ed666b86e12f") : configmap "swift-ring-files" not found Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.683625 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2f2285d-ead9-458a-a0ef-ed666b86e12f-lock\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.688808 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2f2285d-ead9-458a-a0ef-ed666b86e12f-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.698351 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w255g\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-kube-api-access-w255g\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:38 crc kubenswrapper[5032]: I0122 16:22:38.717452 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:39 crc kubenswrapper[5032]: I0122 16:22:39.192176 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:39 crc kubenswrapper[5032]: E0122 16:22:39.192393 5032 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 16:22:39 crc kubenswrapper[5032]: E0122 16:22:39.192426 5032 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 16:22:39 crc kubenswrapper[5032]: E0122 16:22:39.192499 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift podName:a2f2285d-ead9-458a-a0ef-ed666b86e12f nodeName:}" failed. No retries permitted until 2026-01-22 16:22:40.192473811 +0000 UTC m=+1064.004642842 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift") pod "swift-storage-0" (UID: "a2f2285d-ead9-458a-a0ef-ed666b86e12f") : configmap "swift-ring-files" not found Jan 22 16:22:40 crc kubenswrapper[5032]: I0122 16:22:40.206218 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:40 crc kubenswrapper[5032]: E0122 16:22:40.206422 5032 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 16:22:40 crc kubenswrapper[5032]: E0122 16:22:40.206730 5032 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 16:22:40 crc kubenswrapper[5032]: E0122 16:22:40.206816 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift podName:a2f2285d-ead9-458a-a0ef-ed666b86e12f nodeName:}" failed. No retries permitted until 2026-01-22 16:22:42.206789348 +0000 UTC m=+1066.018958409 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift") pod "swift-storage-0" (UID: "a2f2285d-ead9-458a-a0ef-ed666b86e12f") : configmap "swift-ring-files" not found Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.243559 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:42 crc kubenswrapper[5032]: E0122 16:22:42.243752 5032 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 16:22:42 crc kubenswrapper[5032]: E0122 16:22:42.244038 5032 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 16:22:42 crc kubenswrapper[5032]: E0122 16:22:42.244109 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift podName:a2f2285d-ead9-458a-a0ef-ed666b86e12f nodeName:}" failed. No retries permitted until 2026-01-22 16:22:46.244088195 +0000 UTC m=+1070.056257226 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift") pod "swift-storage-0" (UID: "a2f2285d-ead9-458a-a0ef-ed666b86e12f") : configmap "swift-ring-files" not found Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.360987 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" event={"ID":"d15a295c-73fb-4733-b057-dc42e023a51a","Type":"ContainerStarted","Data":"c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a"} Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.501441 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-v5spw"] Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.502579 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.505108 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.505187 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.505966 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.532295 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-v5spw"] Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.555356 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14ae4f20-7067-4eec-b6da-d7e6dce57195-etc-swift\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.555898 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-swiftconf\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.556002 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-combined-ca-bundle\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.556225 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-ring-data-devices\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.556387 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-scripts\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.556893 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz6rd\" (UniqueName: \"kubernetes.io/projected/14ae4f20-7067-4eec-b6da-d7e6dce57195-kube-api-access-zz6rd\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.556936 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-dispersionconf\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.658124 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14ae4f20-7067-4eec-b6da-d7e6dce57195-etc-swift\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.658187 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-swiftconf\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.658217 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-combined-ca-bundle\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.658253 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-ring-data-devices\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.658275 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-scripts\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.658299 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz6rd\" (UniqueName: \"kubernetes.io/projected/14ae4f20-7067-4eec-b6da-d7e6dce57195-kube-api-access-zz6rd\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.658316 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-dispersionconf\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.659143 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14ae4f20-7067-4eec-b6da-d7e6dce57195-etc-swift\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.659730 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-ring-data-devices\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.659749 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-scripts\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.664635 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-combined-ca-bundle\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.665007 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-swiftconf\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.667157 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-dispersionconf\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.677259 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz6rd\" (UniqueName: \"kubernetes.io/projected/14ae4f20-7067-4eec-b6da-d7e6dce57195-kube-api-access-zz6rd\") pod \"swift-ring-rebalance-v5spw\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:42 crc kubenswrapper[5032]: I0122 16:22:42.850359 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:22:43 crc kubenswrapper[5032]: I0122 16:22:43.365302 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-v5spw"] Jan 22 16:22:43 crc kubenswrapper[5032]: I0122 16:22:43.919022 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 22 16:22:43 crc kubenswrapper[5032]: I0122 16:22:43.919403 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 22 16:22:44 crc kubenswrapper[5032]: I0122 16:22:44.389971 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v5spw" event={"ID":"14ae4f20-7067-4eec-b6da-d7e6dce57195","Type":"ContainerStarted","Data":"2fe6d0836b5684e9107b63b352e5390519b74b4c1e7518b3b09a84e7364dedbf"} Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.405256 5032 generic.go:334] "Generic (PLEG): container finished" podID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerID="0f128633b7c55bb85c4cda23560cd0b5bb37e5fdedb65e7f9963623ce64a672f" exitCode=0 Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.405818 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" event={"ID":"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65","Type":"ContainerDied","Data":"0f128633b7c55bb85c4cda23560cd0b5bb37e5fdedb65e7f9963623ce64a672f"} Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.417485 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" podUID="d15a295c-73fb-4733-b057-dc42e023a51a" containerName="dnsmasq-dns" containerID="cri-o://c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a" gracePeriod=10 Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.417573 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-xlhtd" event={"ID":"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a","Type":"ContainerStarted","Data":"506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477"} Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.417624 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.417824 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.456956 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" podStartSLOduration=11.456937108 podStartE2EDuration="11.456937108s" podCreationTimestamp="2026-01-22 16:22:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:45.45217407 +0000 UTC m=+1069.264343091" watchObservedRunningTime="2026-01-22 16:22:45.456937108 +0000 UTC m=+1069.269106139" Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.520716 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-xlhtd" podStartSLOduration=11.520692352 podStartE2EDuration="11.520692352s" podCreationTimestamp="2026-01-22 16:22:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:45.509200515 +0000 UTC m=+1069.321369546" watchObservedRunningTime="2026-01-22 16:22:45.520692352 +0000 UTC m=+1069.332861383" Jan 22 16:22:45 crc kubenswrapper[5032]: I0122 16:22:45.966392 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.032213 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-dns-svc\") pod \"d15a295c-73fb-4733-b057-dc42e023a51a\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.084198 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d15a295c-73fb-4733-b057-dc42e023a51a" (UID: "d15a295c-73fb-4733-b057-dc42e023a51a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.133226 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-ovsdbserver-nb\") pod \"d15a295c-73fb-4733-b057-dc42e023a51a\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.133280 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktrqx\" (UniqueName: \"kubernetes.io/projected/d15a295c-73fb-4733-b057-dc42e023a51a-kube-api-access-ktrqx\") pod \"d15a295c-73fb-4733-b057-dc42e023a51a\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.133326 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-config\") pod \"d15a295c-73fb-4733-b057-dc42e023a51a\" (UID: \"d15a295c-73fb-4733-b057-dc42e023a51a\") " Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.133562 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.137380 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d15a295c-73fb-4733-b057-dc42e023a51a-kube-api-access-ktrqx" (OuterVolumeSpecName: "kube-api-access-ktrqx") pod "d15a295c-73fb-4733-b057-dc42e023a51a" (UID: "d15a295c-73fb-4733-b057-dc42e023a51a"). InnerVolumeSpecName "kube-api-access-ktrqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.172742 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-config" (OuterVolumeSpecName: "config") pod "d15a295c-73fb-4733-b057-dc42e023a51a" (UID: "d15a295c-73fb-4733-b057-dc42e023a51a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.173817 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d15a295c-73fb-4733-b057-dc42e023a51a" (UID: "d15a295c-73fb-4733-b057-dc42e023a51a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.235274 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.235324 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktrqx\" (UniqueName: \"kubernetes.io/projected/d15a295c-73fb-4733-b057-dc42e023a51a-kube-api-access-ktrqx\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.235343 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d15a295c-73fb-4733-b057-dc42e023a51a-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.336603 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:46 crc kubenswrapper[5032]: E0122 16:22:46.336845 5032 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 16:22:46 crc kubenswrapper[5032]: E0122 16:22:46.336892 5032 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 16:22:46 crc kubenswrapper[5032]: E0122 16:22:46.336948 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift podName:a2f2285d-ead9-458a-a0ef-ed666b86e12f nodeName:}" failed. No retries permitted until 2026-01-22 16:22:54.336927433 +0000 UTC m=+1078.149096464 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift") pod "swift-storage-0" (UID: "a2f2285d-ead9-458a-a0ef-ed666b86e12f") : configmap "swift-ring-files" not found Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.436885 5032 generic.go:334] "Generic (PLEG): container finished" podID="d15a295c-73fb-4733-b057-dc42e023a51a" containerID="c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a" exitCode=0 Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.436999 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" event={"ID":"d15a295c-73fb-4733-b057-dc42e023a51a","Type":"ContainerDied","Data":"c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a"} Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.437036 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" event={"ID":"d15a295c-73fb-4733-b057-dc42e023a51a","Type":"ContainerDied","Data":"f609570ddebae20b9686ebab1acef6d54cca31942713d7d6d1c96dc0ad2f4c95"} Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.437112 5032 scope.go:117] "RemoveContainer" containerID="c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.437413 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-f9m6z" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.440502 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" event={"ID":"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65","Type":"ContainerStarted","Data":"39029843a03a5b460a9849cea93e4b9551eddd90c34385524acbae60411c7bdf"} Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.440970 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.449331 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e2012182-a0b0-4312-9235-08bf99c92aa8","Type":"ContainerStarted","Data":"4f3af12c5419e7665b69f494786fc493117a6bc01e61a549d54dd073a50517c8"} Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.449388 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e2012182-a0b0-4312-9235-08bf99c92aa8","Type":"ContainerStarted","Data":"b1b54cd7288ff2435e782c6fc902fe20af987a8d57ffe91de925c381822461b5"} Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.449447 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.467170 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" podStartSLOduration=9.467154554 podStartE2EDuration="9.467154554s" podCreationTimestamp="2026-01-22 16:22:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:46.464073272 +0000 UTC m=+1070.276242313" watchObservedRunningTime="2026-01-22 16:22:46.467154554 +0000 UTC m=+1070.279323585" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.521905 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.132811386 podStartE2EDuration="10.521887818s" podCreationTimestamp="2026-01-22 16:22:36 +0000 UTC" firstStartedPulling="2026-01-22 16:22:37.849732984 +0000 UTC m=+1061.661902015" lastFinishedPulling="2026-01-22 16:22:45.238809426 +0000 UTC m=+1069.050978447" observedRunningTime="2026-01-22 16:22:46.518377114 +0000 UTC m=+1070.330546145" watchObservedRunningTime="2026-01-22 16:22:46.521887818 +0000 UTC m=+1070.334056849" Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.535102 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-f9m6z"] Jan 22 16:22:46 crc kubenswrapper[5032]: I0122 16:22:46.541303 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-f9m6z"] Jan 22 16:22:47 crc kubenswrapper[5032]: E0122 16:22:47.064789 5032 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.145:33992->38.102.83.145:40459: write tcp 38.102.83.145:33992->38.102.83.145:40459: write: broken pipe Jan 22 16:22:47 crc kubenswrapper[5032]: I0122 16:22:47.233015 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 22 16:22:47 crc kubenswrapper[5032]: I0122 16:22:47.320452 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 22 16:22:48 crc kubenswrapper[5032]: I0122 16:22:48.467222 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d15a295c-73fb-4733-b057-dc42e023a51a" path="/var/lib/kubelet/pods/d15a295c-73fb-4733-b057-dc42e023a51a/volumes" Jan 22 16:22:50 crc kubenswrapper[5032]: I0122 16:22:50.043146 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.554183 5032 scope.go:117] "RemoveContainer" containerID="d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.580659 5032 scope.go:117] "RemoveContainer" containerID="c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a" Jan 22 16:22:52 crc kubenswrapper[5032]: E0122 16:22:52.581222 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a\": container with ID starting with c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a not found: ID does not exist" containerID="c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.581266 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a"} err="failed to get container status \"c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a\": rpc error: code = NotFound desc = could not find container \"c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a\": container with ID starting with c4e87d5ae8debcc5bdcd3ee92b06428025f6234f19801b764ea86adec2e4f67a not found: ID does not exist" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.581304 5032 scope.go:117] "RemoveContainer" containerID="d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf" Jan 22 16:22:52 crc kubenswrapper[5032]: E0122 16:22:52.584082 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf\": container with ID starting with d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf not found: ID does not exist" containerID="d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.584114 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf"} err="failed to get container status \"d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf\": rpc error: code = NotFound desc = could not find container \"d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf\": container with ID starting with d31fe1ad17c1364a79cdd2be125fa561178b81e98775cf32c62ff9f1ebc89cbf not found: ID does not exist" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.655570 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-kpmvg"] Jan 22 16:22:52 crc kubenswrapper[5032]: E0122 16:22:52.656081 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d15a295c-73fb-4733-b057-dc42e023a51a" containerName="dnsmasq-dns" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.656103 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d15a295c-73fb-4733-b057-dc42e023a51a" containerName="dnsmasq-dns" Jan 22 16:22:52 crc kubenswrapper[5032]: E0122 16:22:52.656145 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d15a295c-73fb-4733-b057-dc42e023a51a" containerName="init" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.656153 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d15a295c-73fb-4733-b057-dc42e023a51a" containerName="init" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.656377 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d15a295c-73fb-4733-b057-dc42e023a51a" containerName="dnsmasq-dns" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.657130 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.663833 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.679573 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kpmvg"] Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.777325 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-operator-scripts\") pod \"root-account-create-update-kpmvg\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.777414 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pz8tp\" (UniqueName: \"kubernetes.io/projected/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-kube-api-access-pz8tp\") pod \"root-account-create-update-kpmvg\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.844239 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.879198 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pz8tp\" (UniqueName: \"kubernetes.io/projected/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-kube-api-access-pz8tp\") pod \"root-account-create-update-kpmvg\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.879405 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-operator-scripts\") pod \"root-account-create-update-kpmvg\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.880304 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-operator-scripts\") pod \"root-account-create-update-kpmvg\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.907750 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-xlhtd"] Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.908238 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-xlhtd" podUID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerName="dnsmasq-dns" containerID="cri-o://506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477" gracePeriod=10 Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.918136 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pz8tp\" (UniqueName: \"kubernetes.io/projected/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-kube-api-access-pz8tp\") pod \"root-account-create-update-kpmvg\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:52 crc kubenswrapper[5032]: I0122 16:22:52.985518 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.470886 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.508802 5032 generic.go:334] "Generic (PLEG): container finished" podID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerID="506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477" exitCode=0 Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.508849 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-xlhtd" event={"ID":"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a","Type":"ContainerDied","Data":"506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477"} Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.508925 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-xlhtd" event={"ID":"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a","Type":"ContainerDied","Data":"3d615b97f6fa81fa9186bb2488c9f6c12456b50911d502a11025abf9ca8ffb24"} Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.508946 5032 scope.go:117] "RemoveContainer" containerID="506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.509032 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-xlhtd" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.533135 5032 scope.go:117] "RemoveContainer" containerID="22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.559051 5032 scope.go:117] "RemoveContainer" containerID="506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477" Jan 22 16:22:53 crc kubenswrapper[5032]: E0122 16:22:53.559907 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477\": container with ID starting with 506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477 not found: ID does not exist" containerID="506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.559948 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477"} err="failed to get container status \"506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477\": rpc error: code = NotFound desc = could not find container \"506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477\": container with ID starting with 506f49970ea190f41ecb545500549813b5e5f811e31e7ecc0f4f94eb7df0f477 not found: ID does not exist" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.560002 5032 scope.go:117] "RemoveContainer" containerID="22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198" Jan 22 16:22:53 crc kubenswrapper[5032]: E0122 16:22:53.560268 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198\": container with ID starting with 22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198 not found: ID does not exist" containerID="22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.560815 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198"} err="failed to get container status \"22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198\": rpc error: code = NotFound desc = could not find container \"22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198\": container with ID starting with 22d3d3ddd353558e53da8446bc874b4837e4e4d6420197be35ea71d9d38b6198 not found: ID does not exist" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.592536 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-nb\") pod \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.592600 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-dns-svc\") pod \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.592629 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njd25\" (UniqueName: \"kubernetes.io/projected/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-kube-api-access-njd25\") pod \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.592664 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-sb\") pod \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.592702 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-config\") pod \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\" (UID: \"17d9cf0e-c4cb-4066-b647-f3eb9d06d26a\") " Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.602314 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-kube-api-access-njd25" (OuterVolumeSpecName: "kube-api-access-njd25") pod "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" (UID: "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a"). InnerVolumeSpecName "kube-api-access-njd25". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.642945 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" (UID: "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.674456 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" (UID: "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.677803 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" (UID: "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.695473 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-config" (OuterVolumeSpecName: "config") pod "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" (UID: "17d9cf0e-c4cb-4066-b647-f3eb9d06d26a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.696012 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.696051 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njd25\" (UniqueName: \"kubernetes.io/projected/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-kube-api-access-njd25\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.696068 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.696079 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.696089 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.723892 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kpmvg"] Jan 22 16:22:53 crc kubenswrapper[5032]: W0122 16:22:53.727729 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9dfcd49a_aa2e_43d7_bec5_f34aaf361118.slice/crio-9e26fdabb7275eac860c701389635de2503e70716deea6470c49b2851fc780a3 WatchSource:0}: Error finding container 9e26fdabb7275eac860c701389635de2503e70716deea6470c49b2851fc780a3: Status 404 returned error can't find the container with id 9e26fdabb7275eac860c701389635de2503e70716deea6470c49b2851fc780a3 Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.746312 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.826334 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="3124207a-b471-4d1b-a15f-ce4191494ba2" containerName="galera" probeResult="failure" output=< Jan 22 16:22:53 crc kubenswrapper[5032]: wsrep_local_state_comment (Joined) differs from Synced Jan 22 16:22:53 crc kubenswrapper[5032]: > Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.841388 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-xlhtd"] Jan 22 16:22:53 crc kubenswrapper[5032]: I0122 16:22:53.848785 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-xlhtd"] Jan 22 16:22:54 crc kubenswrapper[5032]: I0122 16:22:54.408173 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:22:54 crc kubenswrapper[5032]: E0122 16:22:54.408425 5032 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 16:22:54 crc kubenswrapper[5032]: E0122 16:22:54.408457 5032 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 16:22:54 crc kubenswrapper[5032]: E0122 16:22:54.408537 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift podName:a2f2285d-ead9-458a-a0ef-ed666b86e12f nodeName:}" failed. No retries permitted until 2026-01-22 16:23:10.408511321 +0000 UTC m=+1094.220680352 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift") pod "swift-storage-0" (UID: "a2f2285d-ead9-458a-a0ef-ed666b86e12f") : configmap "swift-ring-files" not found Jan 22 16:22:54 crc kubenswrapper[5032]: I0122 16:22:54.467877 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" path="/var/lib/kubelet/pods/17d9cf0e-c4cb-4066-b647-f3eb9d06d26a/volumes" Jan 22 16:22:54 crc kubenswrapper[5032]: I0122 16:22:54.519001 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v5spw" event={"ID":"14ae4f20-7067-4eec-b6da-d7e6dce57195","Type":"ContainerStarted","Data":"4673fc46b9d9ee219afe6f767ed98fc09f9819f92974db21cf21ece783b2879a"} Jan 22 16:22:54 crc kubenswrapper[5032]: I0122 16:22:54.520819 5032 generic.go:334] "Generic (PLEG): container finished" podID="9dfcd49a-aa2e-43d7-bec5-f34aaf361118" containerID="760773ef52722ec7e122618fd34ccd81d4622c147410b76d3aa1bf3524eaebb0" exitCode=0 Jan 22 16:22:54 crc kubenswrapper[5032]: I0122 16:22:54.520887 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kpmvg" event={"ID":"9dfcd49a-aa2e-43d7-bec5-f34aaf361118","Type":"ContainerDied","Data":"760773ef52722ec7e122618fd34ccd81d4622c147410b76d3aa1bf3524eaebb0"} Jan 22 16:22:54 crc kubenswrapper[5032]: I0122 16:22:54.520908 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kpmvg" event={"ID":"9dfcd49a-aa2e-43d7-bec5-f34aaf361118","Type":"ContainerStarted","Data":"9e26fdabb7275eac860c701389635de2503e70716deea6470c49b2851fc780a3"} Jan 22 16:22:54 crc kubenswrapper[5032]: I0122 16:22:54.540419 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-v5spw" podStartSLOduration=2.856456264 podStartE2EDuration="12.540400757s" podCreationTimestamp="2026-01-22 16:22:42 +0000 UTC" firstStartedPulling="2026-01-22 16:22:43.378028579 +0000 UTC m=+1067.190197610" lastFinishedPulling="2026-01-22 16:22:53.061973072 +0000 UTC m=+1076.874142103" observedRunningTime="2026-01-22 16:22:54.533474301 +0000 UTC m=+1078.345643352" watchObservedRunningTime="2026-01-22 16:22:54.540400757 +0000 UTC m=+1078.352569788" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.001150 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-c2x7n"] Jan 22 16:22:55 crc kubenswrapper[5032]: E0122 16:22:55.001586 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerName="dnsmasq-dns" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.001604 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerName="dnsmasq-dns" Jan 22 16:22:55 crc kubenswrapper[5032]: E0122 16:22:55.001644 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerName="init" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.001653 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerName="init" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.001845 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="17d9cf0e-c4cb-4066-b647-f3eb9d06d26a" containerName="dnsmasq-dns" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.002477 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.027689 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-c2x7n"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.118400 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8b2d-account-create-update-jldnb"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.120219 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.121249 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-operator-scripts\") pod \"keystone-db-create-c2x7n\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.121457 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzjvf\" (UniqueName: \"kubernetes.io/projected/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-kube-api-access-pzjvf\") pod \"keystone-db-create-c2x7n\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.123076 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.127975 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8b2d-account-create-update-jldnb"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.222617 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzjvf\" (UniqueName: \"kubernetes.io/projected/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-kube-api-access-pzjvf\") pod \"keystone-db-create-c2x7n\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.222703 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-operator-scripts\") pod \"keystone-db-create-c2x7n\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.222762 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48dd1fea-2fab-4318-81f4-38c3a21eba22-operator-scripts\") pod \"keystone-8b2d-account-create-update-jldnb\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.222823 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xdqt\" (UniqueName: \"kubernetes.io/projected/48dd1fea-2fab-4318-81f4-38c3a21eba22-kube-api-access-5xdqt\") pod \"keystone-8b2d-account-create-update-jldnb\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.223557 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-operator-scripts\") pod \"keystone-db-create-c2x7n\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.248399 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzjvf\" (UniqueName: \"kubernetes.io/projected/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-kube-api-access-pzjvf\") pod \"keystone-db-create-c2x7n\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.293478 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.315041 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-nsk8m"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.316521 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.326015 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xdqt\" (UniqueName: \"kubernetes.io/projected/48dd1fea-2fab-4318-81f4-38c3a21eba22-kube-api-access-5xdqt\") pod \"keystone-8b2d-account-create-update-jldnb\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.326196 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48dd1fea-2fab-4318-81f4-38c3a21eba22-operator-scripts\") pod \"keystone-8b2d-account-create-update-jldnb\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.327093 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48dd1fea-2fab-4318-81f4-38c3a21eba22-operator-scripts\") pod \"keystone-8b2d-account-create-update-jldnb\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.345047 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.358669 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xdqt\" (UniqueName: \"kubernetes.io/projected/48dd1fea-2fab-4318-81f4-38c3a21eba22-kube-api-access-5xdqt\") pod \"keystone-8b2d-account-create-update-jldnb\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.376972 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-nsk8m"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.431254 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/926e4c07-ca1a-4f2f-9027-2e92f46eed39-operator-scripts\") pod \"placement-db-create-nsk8m\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.431484 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbpg8\" (UniqueName: \"kubernetes.io/projected/926e4c07-ca1a-4f2f-9027-2e92f46eed39-kube-api-access-nbpg8\") pod \"placement-db-create-nsk8m\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.439540 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.440504 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-fcb7-account-create-update-xphbq"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.442067 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.448348 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.448600 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-fcb7-account-create-update-xphbq"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.533615 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/926e4c07-ca1a-4f2f-9027-2e92f46eed39-operator-scripts\") pod \"placement-db-create-nsk8m\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.534835 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981bbcc4-7806-4b54-956f-e686bbf0c7f1-operator-scripts\") pod \"placement-fcb7-account-create-update-xphbq\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.534933 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86q2m\" (UniqueName: \"kubernetes.io/projected/981bbcc4-7806-4b54-956f-e686bbf0c7f1-kube-api-access-86q2m\") pod \"placement-fcb7-account-create-update-xphbq\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.535024 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/926e4c07-ca1a-4f2f-9027-2e92f46eed39-operator-scripts\") pod \"placement-db-create-nsk8m\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.535110 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbpg8\" (UniqueName: \"kubernetes.io/projected/926e4c07-ca1a-4f2f-9027-2e92f46eed39-kube-api-access-nbpg8\") pod \"placement-db-create-nsk8m\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.563685 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbpg8\" (UniqueName: \"kubernetes.io/projected/926e4c07-ca1a-4f2f-9027-2e92f46eed39-kube-api-access-nbpg8\") pod \"placement-db-create-nsk8m\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.624232 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-k8lnb"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.625243 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.633705 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-k8lnb"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.636121 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981bbcc4-7806-4b54-956f-e686bbf0c7f1-operator-scripts\") pod \"placement-fcb7-account-create-update-xphbq\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.636157 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86q2m\" (UniqueName: \"kubernetes.io/projected/981bbcc4-7806-4b54-956f-e686bbf0c7f1-kube-api-access-86q2m\") pod \"placement-fcb7-account-create-update-xphbq\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.636785 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981bbcc4-7806-4b54-956f-e686bbf0c7f1-operator-scripts\") pod \"placement-fcb7-account-create-update-xphbq\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.657082 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86q2m\" (UniqueName: \"kubernetes.io/projected/981bbcc4-7806-4b54-956f-e686bbf0c7f1-kube-api-access-86q2m\") pod \"placement-fcb7-account-create-update-xphbq\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.721897 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-96eb-account-create-update-cms95"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.722958 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.725162 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.738998 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-96eb-account-create-update-cms95"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.739493 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.740137 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91fbfb7d-ff4b-4d4e-9faf-dff333949712-operator-scripts\") pod \"glance-db-create-k8lnb\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.740242 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smrpj\" (UniqueName: \"kubernetes.io/projected/91fbfb7d-ff4b-4d4e-9faf-dff333949712-kube-api-access-smrpj\") pod \"glance-db-create-k8lnb\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.791693 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.848333 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smrpj\" (UniqueName: \"kubernetes.io/projected/91fbfb7d-ff4b-4d4e-9faf-dff333949712-kube-api-access-smrpj\") pod \"glance-db-create-k8lnb\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.849127 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr7l4\" (UniqueName: \"kubernetes.io/projected/64ab4105-77f7-4ee0-95a9-9e8a08acd357-kube-api-access-nr7l4\") pod \"glance-96eb-account-create-update-cms95\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.850871 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64ab4105-77f7-4ee0-95a9-9e8a08acd357-operator-scripts\") pod \"glance-96eb-account-create-update-cms95\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.851145 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91fbfb7d-ff4b-4d4e-9faf-dff333949712-operator-scripts\") pod \"glance-db-create-k8lnb\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.852497 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91fbfb7d-ff4b-4d4e-9faf-dff333949712-operator-scripts\") pod \"glance-db-create-k8lnb\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.868772 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smrpj\" (UniqueName: \"kubernetes.io/projected/91fbfb7d-ff4b-4d4e-9faf-dff333949712-kube-api-access-smrpj\") pod \"glance-db-create-k8lnb\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.906647 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-c2x7n"] Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.953128 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr7l4\" (UniqueName: \"kubernetes.io/projected/64ab4105-77f7-4ee0-95a9-9e8a08acd357-kube-api-access-nr7l4\") pod \"glance-96eb-account-create-update-cms95\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.953192 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64ab4105-77f7-4ee0-95a9-9e8a08acd357-operator-scripts\") pod \"glance-96eb-account-create-update-cms95\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.956185 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64ab4105-77f7-4ee0-95a9-9e8a08acd357-operator-scripts\") pod \"glance-96eb-account-create-update-cms95\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:55 crc kubenswrapper[5032]: I0122 16:22:55.977789 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr7l4\" (UniqueName: \"kubernetes.io/projected/64ab4105-77f7-4ee0-95a9-9e8a08acd357-kube-api-access-nr7l4\") pod \"glance-96eb-account-create-update-cms95\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.005424 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.024650 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8b2d-account-create-update-jldnb"] Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.058776 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.155700 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-operator-scripts\") pod \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.156025 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pz8tp\" (UniqueName: \"kubernetes.io/projected/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-kube-api-access-pz8tp\") pod \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\" (UID: \"9dfcd49a-aa2e-43d7-bec5-f34aaf361118\") " Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.156566 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9dfcd49a-aa2e-43d7-bec5-f34aaf361118" (UID: "9dfcd49a-aa2e-43d7-bec5-f34aaf361118"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.156711 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.160810 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.163941 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-kube-api-access-pz8tp" (OuterVolumeSpecName: "kube-api-access-pz8tp") pod "9dfcd49a-aa2e-43d7-bec5-f34aaf361118" (UID: "9dfcd49a-aa2e-43d7-bec5-f34aaf361118"). InnerVolumeSpecName "kube-api-access-pz8tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.257836 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pz8tp\" (UniqueName: \"kubernetes.io/projected/9dfcd49a-aa2e-43d7-bec5-f34aaf361118-kube-api-access-pz8tp\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.372374 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-nsk8m"] Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.383249 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-k8lnb"] Jan 22 16:22:56 crc kubenswrapper[5032]: W0122 16:22:56.383398 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod926e4c07_ca1a_4f2f_9027_2e92f46eed39.slice/crio-d926caae945d4faa72bc7fe585911f8ef9c603358f180573dc756b11b15cb6da WatchSource:0}: Error finding container d926caae945d4faa72bc7fe585911f8ef9c603358f180573dc756b11b15cb6da: Status 404 returned error can't find the container with id d926caae945d4faa72bc7fe585911f8ef9c603358f180573dc756b11b15cb6da Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.506937 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-fcb7-account-create-update-xphbq"] Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.519580 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.582646 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c2x7n" event={"ID":"e17a9ac9-bf21-4ea7-8801-8108d36d4aae","Type":"ContainerStarted","Data":"2da6cfe59fed5b6d6a5385f7fde3732110ed34469232d3adbab7978cd6163ed6"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.582702 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c2x7n" event={"ID":"e17a9ac9-bf21-4ea7-8801-8108d36d4aae","Type":"ContainerStarted","Data":"25b6b242c5e85fa5196d48621acb061efe076d21b8ae02d3b6503301b1a6ca5d"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.619766 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-c2x7n" podStartSLOduration=2.619745247 podStartE2EDuration="2.619745247s" podCreationTimestamp="2026-01-22 16:22:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:56.608577558 +0000 UTC m=+1080.420746589" watchObservedRunningTime="2026-01-22 16:22:56.619745247 +0000 UTC m=+1080.431914278" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.646547 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-k8lnb" event={"ID":"91fbfb7d-ff4b-4d4e-9faf-dff333949712","Type":"ContainerStarted","Data":"37e5d213c97c6544ecd076b2066cd1408f2cbbae572857e954de116e355dc86a"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.650695 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kpmvg" event={"ID":"9dfcd49a-aa2e-43d7-bec5-f34aaf361118","Type":"ContainerDied","Data":"9e26fdabb7275eac860c701389635de2503e70716deea6470c49b2851fc780a3"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.650726 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e26fdabb7275eac860c701389635de2503e70716deea6470c49b2851fc780a3" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.650795 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kpmvg" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.652635 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-fcb7-account-create-update-xphbq" event={"ID":"981bbcc4-7806-4b54-956f-e686bbf0c7f1","Type":"ContainerStarted","Data":"b30f5cf5052c869f00dc9a4e4b0c676f8c61253fcb6042634b6ca18232abd61e"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.654298 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8b2d-account-create-update-jldnb" event={"ID":"48dd1fea-2fab-4318-81f4-38c3a21eba22","Type":"ContainerStarted","Data":"969b14fa1ad50b1be028371d8276f66999fd32c47906c108fa16300e2ffb028d"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.654367 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8b2d-account-create-update-jldnb" event={"ID":"48dd1fea-2fab-4318-81f4-38c3a21eba22","Type":"ContainerStarted","Data":"2f508e06c0347b2b50384d118f32ea165b6465cd95c45a2117dd7d76dc1d23a1"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.661220 5032 generic.go:334] "Generic (PLEG): container finished" podID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerID="4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76" exitCode=0 Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.661274 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c40041cb-9206-4776-98ee-30a6c1cf5a85","Type":"ContainerDied","Data":"4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.664840 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nsk8m" event={"ID":"926e4c07-ca1a-4f2f-9027-2e92f46eed39","Type":"ContainerStarted","Data":"d926caae945d4faa72bc7fe585911f8ef9c603358f180573dc756b11b15cb6da"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.670423 5032 generic.go:334] "Generic (PLEG): container finished" podID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerID="8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496" exitCode=0 Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.670470 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b","Type":"ContainerDied","Data":"8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496"} Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.716363 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-8b2d-account-create-update-jldnb" podStartSLOduration=1.7163429890000002 podStartE2EDuration="1.716342989s" podCreationTimestamp="2026-01-22 16:22:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:56.680143582 +0000 UTC m=+1080.492312623" watchObservedRunningTime="2026-01-22 16:22:56.716342989 +0000 UTC m=+1080.528512020" Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.720293 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-96eb-account-create-update-cms95"] Jan 22 16:22:56 crc kubenswrapper[5032]: I0122 16:22:56.734994 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.252929 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.696151 5032 generic.go:334] "Generic (PLEG): container finished" podID="e17a9ac9-bf21-4ea7-8801-8108d36d4aae" containerID="2da6cfe59fed5b6d6a5385f7fde3732110ed34469232d3adbab7978cd6163ed6" exitCode=0 Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.696245 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c2x7n" event={"ID":"e17a9ac9-bf21-4ea7-8801-8108d36d4aae","Type":"ContainerDied","Data":"2da6cfe59fed5b6d6a5385f7fde3732110ed34469232d3adbab7978cd6163ed6"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.698208 5032 generic.go:334] "Generic (PLEG): container finished" podID="91fbfb7d-ff4b-4d4e-9faf-dff333949712" containerID="10ea5ea2e8f110d10708d62edec99683c4190ea064d5681acc8161135c8b7f9a" exitCode=0 Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.698285 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-k8lnb" event={"ID":"91fbfb7d-ff4b-4d4e-9faf-dff333949712","Type":"ContainerDied","Data":"10ea5ea2e8f110d10708d62edec99683c4190ea064d5681acc8161135c8b7f9a"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.700154 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-fcb7-account-create-update-xphbq" event={"ID":"981bbcc4-7806-4b54-956f-e686bbf0c7f1","Type":"ContainerStarted","Data":"59c1ad5ed1f15ac53a2c3f12d4fc978baac2ff3cd7f1329e79f963195fa4b69e"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.702759 5032 generic.go:334] "Generic (PLEG): container finished" podID="48dd1fea-2fab-4318-81f4-38c3a21eba22" containerID="969b14fa1ad50b1be028371d8276f66999fd32c47906c108fa16300e2ffb028d" exitCode=0 Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.702813 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8b2d-account-create-update-jldnb" event={"ID":"48dd1fea-2fab-4318-81f4-38c3a21eba22","Type":"ContainerDied","Data":"969b14fa1ad50b1be028371d8276f66999fd32c47906c108fa16300e2ffb028d"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.706344 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c40041cb-9206-4776-98ee-30a6c1cf5a85","Type":"ContainerStarted","Data":"4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.706643 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.708305 5032 generic.go:334] "Generic (PLEG): container finished" podID="926e4c07-ca1a-4f2f-9027-2e92f46eed39" containerID="faabb8e0a26b6d47c4c7c74e794d371a4bc189e9f7aa1e4d93748b2f2fa5a517" exitCode=0 Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.708421 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nsk8m" event={"ID":"926e4c07-ca1a-4f2f-9027-2e92f46eed39","Type":"ContainerDied","Data":"faabb8e0a26b6d47c4c7c74e794d371a4bc189e9f7aa1e4d93748b2f2fa5a517"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.710204 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-96eb-account-create-update-cms95" event={"ID":"64ab4105-77f7-4ee0-95a9-9e8a08acd357","Type":"ContainerStarted","Data":"b3e4430ebef0d9a3446951916cb63c0c4c9b41882e1356df3049a1da004b1cc4"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.710315 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-96eb-account-create-update-cms95" event={"ID":"64ab4105-77f7-4ee0-95a9-9e8a08acd357","Type":"ContainerStarted","Data":"a0b1cb1d6f76b6c72d74a4b38ccbcc79ce9da6eae5f4704d498208bcde23228b"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.714520 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b","Type":"ContainerStarted","Data":"7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0"} Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.715381 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.782776 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.320065635 podStartE2EDuration="57.782749638s" podCreationTimestamp="2026-01-22 16:22:00 +0000 UTC" firstStartedPulling="2026-01-22 16:22:02.918930101 +0000 UTC m=+1026.731099132" lastFinishedPulling="2026-01-22 16:22:22.381614104 +0000 UTC m=+1046.193783135" observedRunningTime="2026-01-22 16:22:57.770534432 +0000 UTC m=+1081.582703483" watchObservedRunningTime="2026-01-22 16:22:57.782749638 +0000 UTC m=+1081.594918689" Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.786213 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-fcb7-account-create-update-xphbq" podStartSLOduration=2.786197141 podStartE2EDuration="2.786197141s" podCreationTimestamp="2026-01-22 16:22:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:57.783772706 +0000 UTC m=+1081.595941727" watchObservedRunningTime="2026-01-22 16:22:57.786197141 +0000 UTC m=+1081.598366182" Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.831311 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.560901551 podStartE2EDuration="56.831293596s" podCreationTimestamp="2026-01-22 16:22:01 +0000 UTC" firstStartedPulling="2026-01-22 16:22:03.093721912 +0000 UTC m=+1026.905890943" lastFinishedPulling="2026-01-22 16:22:22.364113957 +0000 UTC m=+1046.176282988" observedRunningTime="2026-01-22 16:22:57.825871221 +0000 UTC m=+1081.638040272" watchObservedRunningTime="2026-01-22 16:22:57.831293596 +0000 UTC m=+1081.643462627" Jan 22 16:22:57 crc kubenswrapper[5032]: I0122 16:22:57.869836 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-96eb-account-create-update-cms95" podStartSLOduration=2.869818476 podStartE2EDuration="2.869818476s" podCreationTimestamp="2026-01-22 16:22:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:22:57.86359912 +0000 UTC m=+1081.675768151" watchObservedRunningTime="2026-01-22 16:22:57.869818476 +0000 UTC m=+1081.681987507" Jan 22 16:22:58 crc kubenswrapper[5032]: I0122 16:22:58.725145 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-fcb7-account-create-update-xphbq" event={"ID":"981bbcc4-7806-4b54-956f-e686bbf0c7f1","Type":"ContainerDied","Data":"59c1ad5ed1f15ac53a2c3f12d4fc978baac2ff3cd7f1329e79f963195fa4b69e"} Jan 22 16:22:58 crc kubenswrapper[5032]: I0122 16:22:58.724960 5032 generic.go:334] "Generic (PLEG): container finished" podID="981bbcc4-7806-4b54-956f-e686bbf0c7f1" containerID="59c1ad5ed1f15ac53a2c3f12d4fc978baac2ff3cd7f1329e79f963195fa4b69e" exitCode=0 Jan 22 16:22:58 crc kubenswrapper[5032]: I0122 16:22:58.728002 5032 generic.go:334] "Generic (PLEG): container finished" podID="64ab4105-77f7-4ee0-95a9-9e8a08acd357" containerID="b3e4430ebef0d9a3446951916cb63c0c4c9b41882e1356df3049a1da004b1cc4" exitCode=0 Jan 22 16:22:58 crc kubenswrapper[5032]: I0122 16:22:58.728155 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-96eb-account-create-update-cms95" event={"ID":"64ab4105-77f7-4ee0-95a9-9e8a08acd357","Type":"ContainerDied","Data":"b3e4430ebef0d9a3446951916cb63c0c4c9b41882e1356df3049a1da004b1cc4"} Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.247689 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.249335 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.283377 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.294815 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339451 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-operator-scripts\") pod \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339533 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzjvf\" (UniqueName: \"kubernetes.io/projected/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-kube-api-access-pzjvf\") pod \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\" (UID: \"e17a9ac9-bf21-4ea7-8801-8108d36d4aae\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339595 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91fbfb7d-ff4b-4d4e-9faf-dff333949712-operator-scripts\") pod \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339612 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smrpj\" (UniqueName: \"kubernetes.io/projected/91fbfb7d-ff4b-4d4e-9faf-dff333949712-kube-api-access-smrpj\") pod \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\" (UID: \"91fbfb7d-ff4b-4d4e-9faf-dff333949712\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339683 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48dd1fea-2fab-4318-81f4-38c3a21eba22-operator-scripts\") pod \"48dd1fea-2fab-4318-81f4-38c3a21eba22\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339709 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xdqt\" (UniqueName: \"kubernetes.io/projected/48dd1fea-2fab-4318-81f4-38c3a21eba22-kube-api-access-5xdqt\") pod \"48dd1fea-2fab-4318-81f4-38c3a21eba22\" (UID: \"48dd1fea-2fab-4318-81f4-38c3a21eba22\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339781 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/926e4c07-ca1a-4f2f-9027-2e92f46eed39-operator-scripts\") pod \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.339798 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbpg8\" (UniqueName: \"kubernetes.io/projected/926e4c07-ca1a-4f2f-9027-2e92f46eed39-kube-api-access-nbpg8\") pod \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\" (UID: \"926e4c07-ca1a-4f2f-9027-2e92f46eed39\") " Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.343268 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/926e4c07-ca1a-4f2f-9027-2e92f46eed39-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "926e4c07-ca1a-4f2f-9027-2e92f46eed39" (UID: "926e4c07-ca1a-4f2f-9027-2e92f46eed39"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.343553 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48dd1fea-2fab-4318-81f4-38c3a21eba22-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "48dd1fea-2fab-4318-81f4-38c3a21eba22" (UID: "48dd1fea-2fab-4318-81f4-38c3a21eba22"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.343764 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e17a9ac9-bf21-4ea7-8801-8108d36d4aae" (UID: "e17a9ac9-bf21-4ea7-8801-8108d36d4aae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.344293 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91fbfb7d-ff4b-4d4e-9faf-dff333949712-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "91fbfb7d-ff4b-4d4e-9faf-dff333949712" (UID: "91fbfb7d-ff4b-4d4e-9faf-dff333949712"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.346112 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/926e4c07-ca1a-4f2f-9027-2e92f46eed39-kube-api-access-nbpg8" (OuterVolumeSpecName: "kube-api-access-nbpg8") pod "926e4c07-ca1a-4f2f-9027-2e92f46eed39" (UID: "926e4c07-ca1a-4f2f-9027-2e92f46eed39"). InnerVolumeSpecName "kube-api-access-nbpg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.346197 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-kube-api-access-pzjvf" (OuterVolumeSpecName: "kube-api-access-pzjvf") pod "e17a9ac9-bf21-4ea7-8801-8108d36d4aae" (UID: "e17a9ac9-bf21-4ea7-8801-8108d36d4aae"). InnerVolumeSpecName "kube-api-access-pzjvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.346493 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48dd1fea-2fab-4318-81f4-38c3a21eba22-kube-api-access-5xdqt" (OuterVolumeSpecName: "kube-api-access-5xdqt") pod "48dd1fea-2fab-4318-81f4-38c3a21eba22" (UID: "48dd1fea-2fab-4318-81f4-38c3a21eba22"). InnerVolumeSpecName "kube-api-access-5xdqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.347045 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91fbfb7d-ff4b-4d4e-9faf-dff333949712-kube-api-access-smrpj" (OuterVolumeSpecName: "kube-api-access-smrpj") pod "91fbfb7d-ff4b-4d4e-9faf-dff333949712" (UID: "91fbfb7d-ff4b-4d4e-9faf-dff333949712"). InnerVolumeSpecName "kube-api-access-smrpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442060 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48dd1fea-2fab-4318-81f4-38c3a21eba22-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442097 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xdqt\" (UniqueName: \"kubernetes.io/projected/48dd1fea-2fab-4318-81f4-38c3a21eba22-kube-api-access-5xdqt\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442114 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/926e4c07-ca1a-4f2f-9027-2e92f46eed39-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442128 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbpg8\" (UniqueName: \"kubernetes.io/projected/926e4c07-ca1a-4f2f-9027-2e92f46eed39-kube-api-access-nbpg8\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442141 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442154 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzjvf\" (UniqueName: \"kubernetes.io/projected/e17a9ac9-bf21-4ea7-8801-8108d36d4aae-kube-api-access-pzjvf\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442167 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91fbfb7d-ff4b-4d4e-9faf-dff333949712-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.442182 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smrpj\" (UniqueName: \"kubernetes.io/projected/91fbfb7d-ff4b-4d4e-9faf-dff333949712-kube-api-access-smrpj\") on node \"crc\" DevicePath \"\"" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.737844 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nsk8m" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.737922 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nsk8m" event={"ID":"926e4c07-ca1a-4f2f-9027-2e92f46eed39","Type":"ContainerDied","Data":"d926caae945d4faa72bc7fe585911f8ef9c603358f180573dc756b11b15cb6da"} Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.738668 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d926caae945d4faa72bc7fe585911f8ef9c603358f180573dc756b11b15cb6da" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.739531 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c2x7n" event={"ID":"e17a9ac9-bf21-4ea7-8801-8108d36d4aae","Type":"ContainerDied","Data":"25b6b242c5e85fa5196d48621acb061efe076d21b8ae02d3b6503301b1a6ca5d"} Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.739583 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25b6b242c5e85fa5196d48621acb061efe076d21b8ae02d3b6503301b1a6ca5d" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.739671 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c2x7n" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.741019 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-k8lnb" event={"ID":"91fbfb7d-ff4b-4d4e-9faf-dff333949712","Type":"ContainerDied","Data":"37e5d213c97c6544ecd076b2066cd1408f2cbbae572857e954de116e355dc86a"} Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.741047 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37e5d213c97c6544ecd076b2066cd1408f2cbbae572857e954de116e355dc86a" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.741103 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8lnb" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.753048 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8b2d-account-create-update-jldnb" event={"ID":"48dd1fea-2fab-4318-81f4-38c3a21eba22","Type":"ContainerDied","Data":"2f508e06c0347b2b50384d118f32ea165b6465cd95c45a2117dd7d76dc1d23a1"} Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.753084 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f508e06c0347b2b50384d118f32ea165b6465cd95c45a2117dd7d76dc1d23a1" Jan 22 16:22:59 crc kubenswrapper[5032]: I0122 16:22:59.753290 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8b2d-account-create-update-jldnb" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.339415 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.347191 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.360129 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981bbcc4-7806-4b54-956f-e686bbf0c7f1-operator-scripts\") pod \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.360234 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86q2m\" (UniqueName: \"kubernetes.io/projected/981bbcc4-7806-4b54-956f-e686bbf0c7f1-kube-api-access-86q2m\") pod \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\" (UID: \"981bbcc4-7806-4b54-956f-e686bbf0c7f1\") " Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.360630 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/981bbcc4-7806-4b54-956f-e686bbf0c7f1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "981bbcc4-7806-4b54-956f-e686bbf0c7f1" (UID: "981bbcc4-7806-4b54-956f-e686bbf0c7f1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.361029 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/981bbcc4-7806-4b54-956f-e686bbf0c7f1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.365029 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/981bbcc4-7806-4b54-956f-e686bbf0c7f1-kube-api-access-86q2m" (OuterVolumeSpecName: "kube-api-access-86q2m") pod "981bbcc4-7806-4b54-956f-e686bbf0c7f1" (UID: "981bbcc4-7806-4b54-956f-e686bbf0c7f1"). InnerVolumeSpecName "kube-api-access-86q2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.462113 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nr7l4\" (UniqueName: \"kubernetes.io/projected/64ab4105-77f7-4ee0-95a9-9e8a08acd357-kube-api-access-nr7l4\") pod \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.462358 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64ab4105-77f7-4ee0-95a9-9e8a08acd357-operator-scripts\") pod \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\" (UID: \"64ab4105-77f7-4ee0-95a9-9e8a08acd357\") " Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.462755 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64ab4105-77f7-4ee0-95a9-9e8a08acd357-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "64ab4105-77f7-4ee0-95a9-9e8a08acd357" (UID: "64ab4105-77f7-4ee0-95a9-9e8a08acd357"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.463116 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64ab4105-77f7-4ee0-95a9-9e8a08acd357-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.463146 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86q2m\" (UniqueName: \"kubernetes.io/projected/981bbcc4-7806-4b54-956f-e686bbf0c7f1-kube-api-access-86q2m\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.465553 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64ab4105-77f7-4ee0-95a9-9e8a08acd357-kube-api-access-nr7l4" (OuterVolumeSpecName: "kube-api-access-nr7l4") pod "64ab4105-77f7-4ee0-95a9-9e8a08acd357" (UID: "64ab4105-77f7-4ee0-95a9-9e8a08acd357"). InnerVolumeSpecName "kube-api-access-nr7l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.564386 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nr7l4\" (UniqueName: \"kubernetes.io/projected/64ab4105-77f7-4ee0-95a9-9e8a08acd357-kube-api-access-nr7l4\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.761260 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-96eb-account-create-update-cms95" event={"ID":"64ab4105-77f7-4ee0-95a9-9e8a08acd357","Type":"ContainerDied","Data":"a0b1cb1d6f76b6c72d74a4b38ccbcc79ce9da6eae5f4704d498208bcde23228b"} Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.761296 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0b1cb1d6f76b6c72d74a4b38ccbcc79ce9da6eae5f4704d498208bcde23228b" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.761310 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-96eb-account-create-update-cms95" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.763195 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-fcb7-account-create-update-xphbq" event={"ID":"981bbcc4-7806-4b54-956f-e686bbf0c7f1","Type":"ContainerDied","Data":"b30f5cf5052c869f00dc9a4e4b0c676f8c61253fcb6042634b6ca18232abd61e"} Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.763231 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b30f5cf5052c869f00dc9a4e4b0c676f8c61253fcb6042634b6ca18232abd61e" Jan 22 16:23:00 crc kubenswrapper[5032]: I0122 16:23:00.763254 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fcb7-account-create-update-xphbq" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.207420 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-8rk6c" podUID="b60d899c-ce17-4f38-97ea-533ae4e56597" containerName="ovn-controller" probeResult="failure" output=< Jan 22 16:23:01 crc kubenswrapper[5032]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 22 16:23:01 crc kubenswrapper[5032]: > Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.335527 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.348983 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-7s9l4" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593112 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8rk6c-config-s459j"] Jan 22 16:23:01 crc kubenswrapper[5032]: E0122 16:23:01.593424 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="926e4c07-ca1a-4f2f-9027-2e92f46eed39" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593442 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="926e4c07-ca1a-4f2f-9027-2e92f46eed39" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: E0122 16:23:01.593453 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91fbfb7d-ff4b-4d4e-9faf-dff333949712" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593460 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="91fbfb7d-ff4b-4d4e-9faf-dff333949712" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: E0122 16:23:01.593473 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e17a9ac9-bf21-4ea7-8801-8108d36d4aae" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593479 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e17a9ac9-bf21-4ea7-8801-8108d36d4aae" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: E0122 16:23:01.593490 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="981bbcc4-7806-4b54-956f-e686bbf0c7f1" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593497 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="981bbcc4-7806-4b54-956f-e686bbf0c7f1" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: E0122 16:23:01.593510 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64ab4105-77f7-4ee0-95a9-9e8a08acd357" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593517 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="64ab4105-77f7-4ee0-95a9-9e8a08acd357" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: E0122 16:23:01.593538 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48dd1fea-2fab-4318-81f4-38c3a21eba22" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593547 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="48dd1fea-2fab-4318-81f4-38c3a21eba22" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: E0122 16:23:01.593561 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dfcd49a-aa2e-43d7-bec5-f34aaf361118" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593569 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dfcd49a-aa2e-43d7-bec5-f34aaf361118" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593712 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="926e4c07-ca1a-4f2f-9027-2e92f46eed39" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593724 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="981bbcc4-7806-4b54-956f-e686bbf0c7f1" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593733 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="64ab4105-77f7-4ee0-95a9-9e8a08acd357" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593741 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dfcd49a-aa2e-43d7-bec5-f34aaf361118" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593752 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="91fbfb7d-ff4b-4d4e-9faf-dff333949712" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593760 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="e17a9ac9-bf21-4ea7-8801-8108d36d4aae" containerName="mariadb-database-create" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.593768 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="48dd1fea-2fab-4318-81f4-38c3a21eba22" containerName="mariadb-account-create-update" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.594278 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.598422 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.623989 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8rk6c-config-s459j"] Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.681844 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.681946 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run-ovn\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.681980 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-scripts\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.682006 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fqv4\" (UniqueName: \"kubernetes.io/projected/11bdeb30-6ae7-433c-9eea-f46d3da88d05-kube-api-access-7fqv4\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.682034 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-log-ovn\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.682051 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-additional-scripts\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.783502 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fqv4\" (UniqueName: \"kubernetes.io/projected/11bdeb30-6ae7-433c-9eea-f46d3da88d05-kube-api-access-7fqv4\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.783567 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-log-ovn\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.783610 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-additional-scripts\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.783692 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.783751 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run-ovn\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.783787 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-scripts\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.785910 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-scripts\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.786466 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-log-ovn\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.786521 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.786556 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run-ovn\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.787347 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-additional-scripts\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.803688 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fqv4\" (UniqueName: \"kubernetes.io/projected/11bdeb30-6ae7-433c-9eea-f46d3da88d05-kube-api-access-7fqv4\") pod \"ovn-controller-8rk6c-config-s459j\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:01 crc kubenswrapper[5032]: I0122 16:23:01.927394 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:02 crc kubenswrapper[5032]: I0122 16:23:02.376569 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8rk6c-config-s459j"] Jan 22 16:23:02 crc kubenswrapper[5032]: I0122 16:23:02.777479 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8rk6c-config-s459j" event={"ID":"11bdeb30-6ae7-433c-9eea-f46d3da88d05","Type":"ContainerStarted","Data":"e985e3d1e26a7732fcb864c735e48a4fea64c973ee37f49d9a6688ef675375d3"} Jan 22 16:23:02 crc kubenswrapper[5032]: I0122 16:23:02.777829 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8rk6c-config-s459j" event={"ID":"11bdeb30-6ae7-433c-9eea-f46d3da88d05","Type":"ContainerStarted","Data":"b491bc3a842047bba2ac9fdd065a5757e5424975dd4cb530203b53dd73478946"} Jan 22 16:23:02 crc kubenswrapper[5032]: I0122 16:23:02.778940 5032 generic.go:334] "Generic (PLEG): container finished" podID="14ae4f20-7067-4eec-b6da-d7e6dce57195" containerID="4673fc46b9d9ee219afe6f767ed98fc09f9819f92974db21cf21ece783b2879a" exitCode=0 Jan 22 16:23:02 crc kubenswrapper[5032]: I0122 16:23:02.778990 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v5spw" event={"ID":"14ae4f20-7067-4eec-b6da-d7e6dce57195","Type":"ContainerDied","Data":"4673fc46b9d9ee219afe6f767ed98fc09f9819f92974db21cf21ece783b2879a"} Jan 22 16:23:02 crc kubenswrapper[5032]: I0122 16:23:02.791512 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8rk6c-config-s459j" podStartSLOduration=1.791486465 podStartE2EDuration="1.791486465s" podCreationTimestamp="2026-01-22 16:23:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:02.791400413 +0000 UTC m=+1086.603569454" watchObservedRunningTime="2026-01-22 16:23:02.791486465 +0000 UTC m=+1086.603655496" Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.823074 5032 generic.go:334] "Generic (PLEG): container finished" podID="11bdeb30-6ae7-433c-9eea-f46d3da88d05" containerID="e985e3d1e26a7732fcb864c735e48a4fea64c973ee37f49d9a6688ef675375d3" exitCode=0 Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.823760 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8rk6c-config-s459j" event={"ID":"11bdeb30-6ae7-433c-9eea-f46d3da88d05","Type":"ContainerDied","Data":"e985e3d1e26a7732fcb864c735e48a4fea64c973ee37f49d9a6688ef675375d3"} Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.874906 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kpmvg"] Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.880242 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-kpmvg"] Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.902691 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-7lw6c"] Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.903617 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.906882 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.913234 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-7lw6c"] Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.924612 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50ae02a-9b33-400e-963a-aaa815cd061b-operator-scripts\") pod \"root-account-create-update-7lw6c\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:03 crc kubenswrapper[5032]: I0122 16:23:03.924824 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcr4l\" (UniqueName: \"kubernetes.io/projected/c50ae02a-9b33-400e-963a-aaa815cd061b-kube-api-access-dcr4l\") pod \"root-account-create-update-7lw6c\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.026323 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcr4l\" (UniqueName: \"kubernetes.io/projected/c50ae02a-9b33-400e-963a-aaa815cd061b-kube-api-access-dcr4l\") pod \"root-account-create-update-7lw6c\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.026396 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50ae02a-9b33-400e-963a-aaa815cd061b-operator-scripts\") pod \"root-account-create-update-7lw6c\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.027188 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50ae02a-9b33-400e-963a-aaa815cd061b-operator-scripts\") pod \"root-account-create-update-7lw6c\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.048436 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcr4l\" (UniqueName: \"kubernetes.io/projected/c50ae02a-9b33-400e-963a-aaa815cd061b-kube-api-access-dcr4l\") pod \"root-account-create-update-7lw6c\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.206140 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.228890 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zz6rd\" (UniqueName: \"kubernetes.io/projected/14ae4f20-7067-4eec-b6da-d7e6dce57195-kube-api-access-zz6rd\") pod \"14ae4f20-7067-4eec-b6da-d7e6dce57195\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.228978 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14ae4f20-7067-4eec-b6da-d7e6dce57195-etc-swift\") pod \"14ae4f20-7067-4eec-b6da-d7e6dce57195\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.229071 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-swiftconf\") pod \"14ae4f20-7067-4eec-b6da-d7e6dce57195\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.229107 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-ring-data-devices\") pod \"14ae4f20-7067-4eec-b6da-d7e6dce57195\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.229148 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-combined-ca-bundle\") pod \"14ae4f20-7067-4eec-b6da-d7e6dce57195\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.229261 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-dispersionconf\") pod \"14ae4f20-7067-4eec-b6da-d7e6dce57195\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.229330 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-scripts\") pod \"14ae4f20-7067-4eec-b6da-d7e6dce57195\" (UID: \"14ae4f20-7067-4eec-b6da-d7e6dce57195\") " Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.230357 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14ae4f20-7067-4eec-b6da-d7e6dce57195-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "14ae4f20-7067-4eec-b6da-d7e6dce57195" (UID: "14ae4f20-7067-4eec-b6da-d7e6dce57195"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.230372 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "14ae4f20-7067-4eec-b6da-d7e6dce57195" (UID: "14ae4f20-7067-4eec-b6da-d7e6dce57195"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.232635 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.242129 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14ae4f20-7067-4eec-b6da-d7e6dce57195-kube-api-access-zz6rd" (OuterVolumeSpecName: "kube-api-access-zz6rd") pod "14ae4f20-7067-4eec-b6da-d7e6dce57195" (UID: "14ae4f20-7067-4eec-b6da-d7e6dce57195"). InnerVolumeSpecName "kube-api-access-zz6rd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.269412 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-scripts" (OuterVolumeSpecName: "scripts") pod "14ae4f20-7067-4eec-b6da-d7e6dce57195" (UID: "14ae4f20-7067-4eec-b6da-d7e6dce57195"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.271944 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "14ae4f20-7067-4eec-b6da-d7e6dce57195" (UID: "14ae4f20-7067-4eec-b6da-d7e6dce57195"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.285327 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "14ae4f20-7067-4eec-b6da-d7e6dce57195" (UID: "14ae4f20-7067-4eec-b6da-d7e6dce57195"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.331807 5032 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.331833 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.331844 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zz6rd\" (UniqueName: \"kubernetes.io/projected/14ae4f20-7067-4eec-b6da-d7e6dce57195-kube-api-access-zz6rd\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.331866 5032 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/14ae4f20-7067-4eec-b6da-d7e6dce57195-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.331876 5032 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.331883 5032 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/14ae4f20-7067-4eec-b6da-d7e6dce57195-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.337067 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14ae4f20-7067-4eec-b6da-d7e6dce57195" (UID: "14ae4f20-7067-4eec-b6da-d7e6dce57195"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.432884 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ae4f20-7067-4eec-b6da-d7e6dce57195-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.468625 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dfcd49a-aa2e-43d7-bec5-f34aaf361118" path="/var/lib/kubelet/pods/9dfcd49a-aa2e-43d7-bec5-f34aaf361118/volumes" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.701143 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-7lw6c"] Jan 22 16:23:04 crc kubenswrapper[5032]: W0122 16:23:04.701996 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc50ae02a_9b33_400e_963a_aaa815cd061b.slice/crio-76129ff04ce30aa92131166c64bf5ae8c6ecf066580775d85f855419aa708b34 WatchSource:0}: Error finding container 76129ff04ce30aa92131166c64bf5ae8c6ecf066580775d85f855419aa708b34: Status 404 returned error can't find the container with id 76129ff04ce30aa92131166c64bf5ae8c6ecf066580775d85f855419aa708b34 Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.832391 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v5spw" event={"ID":"14ae4f20-7067-4eec-b6da-d7e6dce57195","Type":"ContainerDied","Data":"2fe6d0836b5684e9107b63b352e5390519b74b4c1e7518b3b09a84e7364dedbf"} Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.832456 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2fe6d0836b5684e9107b63b352e5390519b74b4c1e7518b3b09a84e7364dedbf" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.832504 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v5spw" Jan 22 16:23:04 crc kubenswrapper[5032]: I0122 16:23:04.837201 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-7lw6c" event={"ID":"c50ae02a-9b33-400e-963a-aaa815cd061b","Type":"ContainerStarted","Data":"76129ff04ce30aa92131166c64bf5ae8c6ecf066580775d85f855419aa708b34"} Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.162556 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.247959 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run-ovn\") pod \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248366 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run\") pod \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248071 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "11bdeb30-6ae7-433c-9eea-f46d3da88d05" (UID: "11bdeb30-6ae7-433c-9eea-f46d3da88d05"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248465 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fqv4\" (UniqueName: \"kubernetes.io/projected/11bdeb30-6ae7-433c-9eea-f46d3da88d05-kube-api-access-7fqv4\") pod \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248479 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run" (OuterVolumeSpecName: "var-run") pod "11bdeb30-6ae7-433c-9eea-f46d3da88d05" (UID: "11bdeb30-6ae7-433c-9eea-f46d3da88d05"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248532 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-log-ovn\") pod \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248643 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-scripts\") pod \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248693 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-additional-scripts\") pod \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\" (UID: \"11bdeb30-6ae7-433c-9eea-f46d3da88d05\") " Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.248803 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "11bdeb30-6ae7-433c-9eea-f46d3da88d05" (UID: "11bdeb30-6ae7-433c-9eea-f46d3da88d05"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.249083 5032 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.249105 5032 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-run\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.249115 5032 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/11bdeb30-6ae7-433c-9eea-f46d3da88d05-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.249556 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "11bdeb30-6ae7-433c-9eea-f46d3da88d05" (UID: "11bdeb30-6ae7-433c-9eea-f46d3da88d05"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.249679 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-scripts" (OuterVolumeSpecName: "scripts") pod "11bdeb30-6ae7-433c-9eea-f46d3da88d05" (UID: "11bdeb30-6ae7-433c-9eea-f46d3da88d05"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.254831 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11bdeb30-6ae7-433c-9eea-f46d3da88d05-kube-api-access-7fqv4" (OuterVolumeSpecName: "kube-api-access-7fqv4") pod "11bdeb30-6ae7-433c-9eea-f46d3da88d05" (UID: "11bdeb30-6ae7-433c-9eea-f46d3da88d05"). InnerVolumeSpecName "kube-api-access-7fqv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.350481 5032 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.350520 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fqv4\" (UniqueName: \"kubernetes.io/projected/11bdeb30-6ae7-433c-9eea-f46d3da88d05-kube-api-access-7fqv4\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.350532 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/11bdeb30-6ae7-433c-9eea-f46d3da88d05-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.846967 5032 generic.go:334] "Generic (PLEG): container finished" podID="c50ae02a-9b33-400e-963a-aaa815cd061b" containerID="f2b8db2f9f32bd8ef318a34f79bc0d86746744be0801a396c7c45e264aa82478" exitCode=0 Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.847050 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-7lw6c" event={"ID":"c50ae02a-9b33-400e-963a-aaa815cd061b","Type":"ContainerDied","Data":"f2b8db2f9f32bd8ef318a34f79bc0d86746744be0801a396c7c45e264aa82478"} Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.848625 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8rk6c-config-s459j" event={"ID":"11bdeb30-6ae7-433c-9eea-f46d3da88d05","Type":"ContainerDied","Data":"b491bc3a842047bba2ac9fdd065a5757e5424975dd4cb530203b53dd73478946"} Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.848732 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b491bc3a842047bba2ac9fdd065a5757e5424975dd4cb530203b53dd73478946" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.848663 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8rk6c-config-s459j" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.890138 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-vjptd"] Jan 22 16:23:05 crc kubenswrapper[5032]: E0122 16:23:05.890455 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11bdeb30-6ae7-433c-9eea-f46d3da88d05" containerName="ovn-config" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.890471 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="11bdeb30-6ae7-433c-9eea-f46d3da88d05" containerName="ovn-config" Jan 22 16:23:05 crc kubenswrapper[5032]: E0122 16:23:05.890489 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14ae4f20-7067-4eec-b6da-d7e6dce57195" containerName="swift-ring-rebalance" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.890496 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="14ae4f20-7067-4eec-b6da-d7e6dce57195" containerName="swift-ring-rebalance" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.890640 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="11bdeb30-6ae7-433c-9eea-f46d3da88d05" containerName="ovn-config" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.890655 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="14ae4f20-7067-4eec-b6da-d7e6dce57195" containerName="swift-ring-rebalance" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.891128 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.892995 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.894695 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-l9wcj" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.902536 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-vjptd"] Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.952673 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8rk6c-config-s459j"] Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.960753 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-config-data\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.960812 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-db-sync-config-data\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.960911 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-combined-ca-bundle\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.960932 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-672gp\" (UniqueName: \"kubernetes.io/projected/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-kube-api-access-672gp\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:05 crc kubenswrapper[5032]: I0122 16:23:05.961981 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-8rk6c-config-s459j"] Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.061814 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-config-data\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.061889 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-db-sync-config-data\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.061967 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-combined-ca-bundle\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.061996 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-672gp\" (UniqueName: \"kubernetes.io/projected/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-kube-api-access-672gp\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.066765 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-db-sync-config-data\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.080433 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-config-data\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.080776 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-combined-ca-bundle\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.083479 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-672gp\" (UniqueName: \"kubernetes.io/projected/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-kube-api-access-672gp\") pod \"glance-db-sync-vjptd\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.192959 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-8rk6c" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.204865 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.476926 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11bdeb30-6ae7-433c-9eea-f46d3da88d05" path="/var/lib/kubelet/pods/11bdeb30-6ae7-433c-9eea-f46d3da88d05/volumes" Jan 22 16:23:06 crc kubenswrapper[5032]: I0122 16:23:06.853447 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-vjptd"] Jan 22 16:23:06 crc kubenswrapper[5032]: W0122 16:23:06.857856 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8f0b2b5_fc74_4525_bf25_5d00abc5c89c.slice/crio-40568ab5fff71307cf7f82f48b121ca9136f5b86ca785db78c41da6b3f26184d WatchSource:0}: Error finding container 40568ab5fff71307cf7f82f48b121ca9136f5b86ca785db78c41da6b3f26184d: Status 404 returned error can't find the container with id 40568ab5fff71307cf7f82f48b121ca9136f5b86ca785db78c41da6b3f26184d Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.199831 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.287068 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcr4l\" (UniqueName: \"kubernetes.io/projected/c50ae02a-9b33-400e-963a-aaa815cd061b-kube-api-access-dcr4l\") pod \"c50ae02a-9b33-400e-963a-aaa815cd061b\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.287198 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50ae02a-9b33-400e-963a-aaa815cd061b-operator-scripts\") pod \"c50ae02a-9b33-400e-963a-aaa815cd061b\" (UID: \"c50ae02a-9b33-400e-963a-aaa815cd061b\") " Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.288118 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c50ae02a-9b33-400e-963a-aaa815cd061b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c50ae02a-9b33-400e-963a-aaa815cd061b" (UID: "c50ae02a-9b33-400e-963a-aaa815cd061b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.295084 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c50ae02a-9b33-400e-963a-aaa815cd061b-kube-api-access-dcr4l" (OuterVolumeSpecName: "kube-api-access-dcr4l") pod "c50ae02a-9b33-400e-963a-aaa815cd061b" (UID: "c50ae02a-9b33-400e-963a-aaa815cd061b"). InnerVolumeSpecName "kube-api-access-dcr4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.389500 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50ae02a-9b33-400e-963a-aaa815cd061b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.389537 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcr4l\" (UniqueName: \"kubernetes.io/projected/c50ae02a-9b33-400e-963a-aaa815cd061b-kube-api-access-dcr4l\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.864133 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vjptd" event={"ID":"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c","Type":"ContainerStarted","Data":"40568ab5fff71307cf7f82f48b121ca9136f5b86ca785db78c41da6b3f26184d"} Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.865613 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-7lw6c" event={"ID":"c50ae02a-9b33-400e-963a-aaa815cd061b","Type":"ContainerDied","Data":"76129ff04ce30aa92131166c64bf5ae8c6ecf066580775d85f855419aa708b34"} Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.865663 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76129ff04ce30aa92131166c64bf5ae8c6ecf066580775d85f855419aa708b34" Jan 22 16:23:07 crc kubenswrapper[5032]: I0122 16:23:07.865694 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7lw6c" Jan 22 16:23:10 crc kubenswrapper[5032]: I0122 16:23:10.447150 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:23:10 crc kubenswrapper[5032]: I0122 16:23:10.454326 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2f2285d-ead9-458a-a0ef-ed666b86e12f-etc-swift\") pod \"swift-storage-0\" (UID: \"a2f2285d-ead9-458a-a0ef-ed666b86e12f\") " pod="openstack/swift-storage-0" Jan 22 16:23:10 crc kubenswrapper[5032]: I0122 16:23:10.614056 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 16:23:11 crc kubenswrapper[5032]: I0122 16:23:11.245004 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 22 16:23:11 crc kubenswrapper[5032]: I0122 16:23:11.909900 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"972a2b25a45890b1d3dea235db191d165f7e5f5e3012d9d8e611b496706b300c"} Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.304066 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.560866 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-lvq7q"] Jan 22 16:23:12 crc kubenswrapper[5032]: E0122 16:23:12.561449 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50ae02a-9b33-400e-963a-aaa815cd061b" containerName="mariadb-account-create-update" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.561461 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50ae02a-9b33-400e-963a-aaa815cd061b" containerName="mariadb-account-create-update" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.561615 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c50ae02a-9b33-400e-963a-aaa815cd061b" containerName="mariadb-account-create-update" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.563234 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.588607 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-lvq7q"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.607057 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.690765 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqcwf\" (UniqueName: \"kubernetes.io/projected/09a04ba4-1451-4a64-9f1a-4f06fa96c352-kube-api-access-tqcwf\") pod \"cinder-db-create-lvq7q\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.690822 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09a04ba4-1451-4a64-9f1a-4f06fa96c352-operator-scripts\") pod \"cinder-db-create-lvq7q\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.693970 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-jkm9n"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.694906 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.711953 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-11dc-account-create-update-n66d9"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.712793 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.718132 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.726305 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-jkm9n"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.733865 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-11dc-account-create-update-n66d9"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.793661 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09a04ba4-1451-4a64-9f1a-4f06fa96c352-operator-scripts\") pod \"cinder-db-create-lvq7q\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.793849 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqcwf\" (UniqueName: \"kubernetes.io/projected/09a04ba4-1451-4a64-9f1a-4f06fa96c352-kube-api-access-tqcwf\") pod \"cinder-db-create-lvq7q\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.795062 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09a04ba4-1451-4a64-9f1a-4f06fa96c352-operator-scripts\") pod \"cinder-db-create-lvq7q\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.838095 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqcwf\" (UniqueName: \"kubernetes.io/projected/09a04ba4-1451-4a64-9f1a-4f06fa96c352-kube-api-access-tqcwf\") pod \"cinder-db-create-lvq7q\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.893050 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.895249 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzvjp\" (UniqueName: \"kubernetes.io/projected/1cd3c01c-a661-4710-81fe-92a4b7af8073-kube-api-access-wzvjp\") pod \"cinder-11dc-account-create-update-n66d9\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.895310 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1cd3c01c-a661-4710-81fe-92a4b7af8073-operator-scripts\") pod \"cinder-11dc-account-create-update-n66d9\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.895360 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9824\" (UniqueName: \"kubernetes.io/projected/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-kube-api-access-f9824\") pod \"barbican-db-create-jkm9n\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.895447 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-operator-scripts\") pod \"barbican-db-create-jkm9n\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.924002 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d2fb-account-create-update-96qf2"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.933579 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.940264 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.964446 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d2fb-account-create-update-96qf2"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.976007 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-8qf6r"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.978084 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.992681 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8qf6r"] Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.996704 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzvjp\" (UniqueName: \"kubernetes.io/projected/1cd3c01c-a661-4710-81fe-92a4b7af8073-kube-api-access-wzvjp\") pod \"cinder-11dc-account-create-update-n66d9\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.996747 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1cd3c01c-a661-4710-81fe-92a4b7af8073-operator-scripts\") pod \"cinder-11dc-account-create-update-n66d9\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.996800 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9824\" (UniqueName: \"kubernetes.io/projected/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-kube-api-access-f9824\") pod \"barbican-db-create-jkm9n\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.996821 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pcr2\" (UniqueName: \"kubernetes.io/projected/aebc03fb-af9a-4652-8649-d8e2275faa46-kube-api-access-2pcr2\") pod \"barbican-d2fb-account-create-update-96qf2\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.996908 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-operator-scripts\") pod \"barbican-db-create-jkm9n\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.996938 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aebc03fb-af9a-4652-8649-d8e2275faa46-operator-scripts\") pod \"barbican-d2fb-account-create-update-96qf2\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.997827 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1cd3c01c-a661-4710-81fe-92a4b7af8073-operator-scripts\") pod \"cinder-11dc-account-create-update-n66d9\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:12 crc kubenswrapper[5032]: I0122 16:23:12.998198 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-operator-scripts\") pod \"barbican-db-create-jkm9n\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.019563 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzvjp\" (UniqueName: \"kubernetes.io/projected/1cd3c01c-a661-4710-81fe-92a4b7af8073-kube-api-access-wzvjp\") pod \"cinder-11dc-account-create-update-n66d9\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.037544 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9824\" (UniqueName: \"kubernetes.io/projected/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-kube-api-access-f9824\") pod \"barbican-db-create-jkm9n\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.048990 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.089905 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-15b8-account-create-update-tk2d2"] Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.091073 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.095822 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.098013 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pcr2\" (UniqueName: \"kubernetes.io/projected/aebc03fb-af9a-4652-8649-d8e2275faa46-kube-api-access-2pcr2\") pod \"barbican-d2fb-account-create-update-96qf2\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.098124 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aebc03fb-af9a-4652-8649-d8e2275faa46-operator-scripts\") pod \"barbican-d2fb-account-create-update-96qf2\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.098157 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsvxv\" (UniqueName: \"kubernetes.io/projected/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-kube-api-access-tsvxv\") pod \"neutron-db-create-8qf6r\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.098187 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-operator-scripts\") pod \"neutron-db-create-8qf6r\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.098893 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aebc03fb-af9a-4652-8649-d8e2275faa46-operator-scripts\") pod \"barbican-d2fb-account-create-update-96qf2\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.100234 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-9zk6w"] Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.102145 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.106462 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.106639 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wncsb" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.106668 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.106931 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.122737 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-9zk6w"] Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.124194 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pcr2\" (UniqueName: \"kubernetes.io/projected/aebc03fb-af9a-4652-8649-d8e2275faa46-kube-api-access-2pcr2\") pod \"barbican-d2fb-account-create-update-96qf2\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.135525 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-15b8-account-create-update-tk2d2"] Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.199132 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-config-data\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.199267 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h528\" (UniqueName: \"kubernetes.io/projected/300400f9-f92b-4159-982c-310398e166c8-kube-api-access-8h528\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.199410 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-combined-ca-bundle\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.199452 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-operator-scripts\") pod \"neutron-15b8-account-create-update-tk2d2\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.199588 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsvxv\" (UniqueName: \"kubernetes.io/projected/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-kube-api-access-tsvxv\") pod \"neutron-db-create-8qf6r\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.199646 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-operator-scripts\") pod \"neutron-db-create-8qf6r\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.199672 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdrfm\" (UniqueName: \"kubernetes.io/projected/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-kube-api-access-bdrfm\") pod \"neutron-15b8-account-create-update-tk2d2\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.200312 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-operator-scripts\") pod \"neutron-db-create-8qf6r\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.221313 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsvxv\" (UniqueName: \"kubernetes.io/projected/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-kube-api-access-tsvxv\") pod \"neutron-db-create-8qf6r\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.274455 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.296712 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.300590 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h528\" (UniqueName: \"kubernetes.io/projected/300400f9-f92b-4159-982c-310398e166c8-kube-api-access-8h528\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.300659 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-combined-ca-bundle\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.300689 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-operator-scripts\") pod \"neutron-15b8-account-create-update-tk2d2\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.300750 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdrfm\" (UniqueName: \"kubernetes.io/projected/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-kube-api-access-bdrfm\") pod \"neutron-15b8-account-create-update-tk2d2\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.300783 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-config-data\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.301558 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-operator-scripts\") pod \"neutron-15b8-account-create-update-tk2d2\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.305874 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-config-data\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.306527 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-combined-ca-bundle\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.312511 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.315308 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdrfm\" (UniqueName: \"kubernetes.io/projected/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-kube-api-access-bdrfm\") pod \"neutron-15b8-account-create-update-tk2d2\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.317503 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h528\" (UniqueName: \"kubernetes.io/projected/300400f9-f92b-4159-982c-310398e166c8-kube-api-access-8h528\") pod \"keystone-db-sync-9zk6w\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.405193 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:13 crc kubenswrapper[5032]: I0122 16:23:13.422388 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:22 crc kubenswrapper[5032]: I0122 16:23:22.342990 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-9zk6w"] Jan 22 16:23:22 crc kubenswrapper[5032]: I0122 16:23:22.391154 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-jkm9n"] Jan 22 16:23:22 crc kubenswrapper[5032]: I0122 16:23:22.399802 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d2fb-account-create-update-96qf2"] Jan 22 16:23:22 crc kubenswrapper[5032]: W0122 16:23:22.415708 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod527109ed_e0d3_45c7_b9b4_ae78b90e28d3.slice/crio-4dcc5fd688072dcc11c384a9ed4c738998de29e79517deb6496dfd9fb4fd3a1b WatchSource:0}: Error finding container 4dcc5fd688072dcc11c384a9ed4c738998de29e79517deb6496dfd9fb4fd3a1b: Status 404 returned error can't find the container with id 4dcc5fd688072dcc11c384a9ed4c738998de29e79517deb6496dfd9fb4fd3a1b Jan 22 16:23:22 crc kubenswrapper[5032]: W0122 16:23:22.417023 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ebb92cf_7cfa_4d87_995f_eef5fbd8ec53.slice/crio-79d6a96666924670cd5e843573376d5689eebef550a9e51c8344cce5fb75950c WatchSource:0}: Error finding container 79d6a96666924670cd5e843573376d5689eebef550a9e51c8344cce5fb75950c: Status 404 returned error can't find the container with id 79d6a96666924670cd5e843573376d5689eebef550a9e51c8344cce5fb75950c Jan 22 16:23:22 crc kubenswrapper[5032]: I0122 16:23:22.419944 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8qf6r"] Jan 22 16:23:22 crc kubenswrapper[5032]: I0122 16:23:22.428513 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-15b8-account-create-update-tk2d2"] Jan 22 16:23:22 crc kubenswrapper[5032]: I0122 16:23:22.440115 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-lvq7q"] Jan 22 16:23:22 crc kubenswrapper[5032]: W0122 16:23:22.444902 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09a04ba4_1451_4a64_9f1a_4f06fa96c352.slice/crio-8fd0fa5f5eae34ff099f45cef6645ae10ec37fb67addb7ad1797b17aa225ad86 WatchSource:0}: Error finding container 8fd0fa5f5eae34ff099f45cef6645ae10ec37fb67addb7ad1797b17aa225ad86: Status 404 returned error can't find the container with id 8fd0fa5f5eae34ff099f45cef6645ae10ec37fb67addb7ad1797b17aa225ad86 Jan 22 16:23:22 crc kubenswrapper[5032]: I0122 16:23:22.540665 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-11dc-account-create-update-n66d9"] Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.014267 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8qf6r" event={"ID":"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53","Type":"ContainerStarted","Data":"97959e7aa9727703f3f1a95f91a2dc69d3e21db93dd2f95f7769fe44222a07f0"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.014663 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8qf6r" event={"ID":"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53","Type":"ContainerStarted","Data":"79d6a96666924670cd5e843573376d5689eebef550a9e51c8344cce5fb75950c"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.016355 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"0a63dfd917c922f5005be606a8825a1ff1efb3aa1c22272555ab6266f9beedbe"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.016378 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"b75f38e1d5794bdd51cc60ad75dee892e48bfec023081ee402034d18747494af"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.016387 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"1b9059dc4e0bde8d41a9addcd0cee25cd5e1e6d32827cb57e5c28034947bf563"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.016396 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"94daf0968e95925e0ce7217dfcdf55cc3942002ee2e486edf331924f4b5ad280"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.018038 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lvq7q" event={"ID":"09a04ba4-1451-4a64-9f1a-4f06fa96c352","Type":"ContainerStarted","Data":"e2bf8296ca86e8d3119644f07167aa50ba55dcddcfddf9d8a0f16b99461e7370"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.018062 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lvq7q" event={"ID":"09a04ba4-1451-4a64-9f1a-4f06fa96c352","Type":"ContainerStarted","Data":"8fd0fa5f5eae34ff099f45cef6645ae10ec37fb67addb7ad1797b17aa225ad86"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.020361 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vjptd" event={"ID":"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c","Type":"ContainerStarted","Data":"11843ef879c0aee64b416fe7c872b45eadec752543a0f84304c2801c8816f927"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.022192 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jkm9n" event={"ID":"527109ed-e0d3-45c7-b9b4-ae78b90e28d3","Type":"ContainerStarted","Data":"20a77bc4b7f46522e4d6e6b01923b2f208f37916ac1495cb697dc3f9d3e6299b"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.022211 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jkm9n" event={"ID":"527109ed-e0d3-45c7-b9b4-ae78b90e28d3","Type":"ContainerStarted","Data":"4dcc5fd688072dcc11c384a9ed4c738998de29e79517deb6496dfd9fb4fd3a1b"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.023795 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9zk6w" event={"ID":"300400f9-f92b-4159-982c-310398e166c8","Type":"ContainerStarted","Data":"bf8af30341ec149b58b5d6f5e20f820d18bd3f53af314cf5d1e0cc72edad7d35"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.029719 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-15b8-account-create-update-tk2d2" event={"ID":"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b","Type":"ContainerStarted","Data":"a6a095be6930dff471c0024b2ae1b31cbb75fd7cfdb5e65c1b672ea125fed651"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.029765 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-15b8-account-create-update-tk2d2" event={"ID":"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b","Type":"ContainerStarted","Data":"b85a17d012003c7745b1aa842f49d8caf430b40ff8a6b101ac5fbe3d12c9a0b3"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.044608 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-11dc-account-create-update-n66d9" event={"ID":"1cd3c01c-a661-4710-81fe-92a4b7af8073","Type":"ContainerStarted","Data":"821a6564675ec50faf16ea7cc5a0fc3fa26aeb191eb526ad55e028df360200a9"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.044654 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-11dc-account-create-update-n66d9" event={"ID":"1cd3c01c-a661-4710-81fe-92a4b7af8073","Type":"ContainerStarted","Data":"c2fca3851cd8b2e82c6bf881985c01a66a2f5c662832b5492f2dc72fb15c7209"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.044675 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-8qf6r" podStartSLOduration=11.044653149 podStartE2EDuration="11.044653149s" podCreationTimestamp="2026-01-22 16:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:23.029843813 +0000 UTC m=+1106.842012844" watchObservedRunningTime="2026-01-22 16:23:23.044653149 +0000 UTC m=+1106.856822180" Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.051795 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d2fb-account-create-update-96qf2" event={"ID":"aebc03fb-af9a-4652-8649-d8e2275faa46","Type":"ContainerStarted","Data":"8aee5ed8e0588ab1fa8d0fbdfd8a61d020ff2d997763e44fd9470465c0197cbf"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.051849 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d2fb-account-create-update-96qf2" event={"ID":"aebc03fb-af9a-4652-8649-d8e2275faa46","Type":"ContainerStarted","Data":"c338d57a679186b429b5cf6dbedd10e01ced2cfe36dc1d66a6563f7f5b89428e"} Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.076197 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-lvq7q" podStartSLOduration=11.076175552 podStartE2EDuration="11.076175552s" podCreationTimestamp="2026-01-22 16:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:23.049110668 +0000 UTC m=+1106.861279719" watchObservedRunningTime="2026-01-22 16:23:23.076175552 +0000 UTC m=+1106.888344593" Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.090262 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-jkm9n" podStartSLOduration=11.090244148 podStartE2EDuration="11.090244148s" podCreationTimestamp="2026-01-22 16:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:23.064002366 +0000 UTC m=+1106.876171417" watchObservedRunningTime="2026-01-22 16:23:23.090244148 +0000 UTC m=+1106.902413179" Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.093599 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-vjptd" podStartSLOduration=3.179363576 podStartE2EDuration="18.093591807s" podCreationTimestamp="2026-01-22 16:23:05 +0000 UTC" firstStartedPulling="2026-01-22 16:23:06.861090824 +0000 UTC m=+1090.673259855" lastFinishedPulling="2026-01-22 16:23:21.775319055 +0000 UTC m=+1105.587488086" observedRunningTime="2026-01-22 16:23:23.086835847 +0000 UTC m=+1106.899004868" watchObservedRunningTime="2026-01-22 16:23:23.093591807 +0000 UTC m=+1106.905760838" Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.104331 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-15b8-account-create-update-tk2d2" podStartSLOduration=10.104309474 podStartE2EDuration="10.104309474s" podCreationTimestamp="2026-01-22 16:23:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:23.10078496 +0000 UTC m=+1106.912954001" watchObservedRunningTime="2026-01-22 16:23:23.104309474 +0000 UTC m=+1106.916478515" Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.118675 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-11dc-account-create-update-n66d9" podStartSLOduration=11.118654837 podStartE2EDuration="11.118654837s" podCreationTimestamp="2026-01-22 16:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:23.114549187 +0000 UTC m=+1106.926718218" watchObservedRunningTime="2026-01-22 16:23:23.118654837 +0000 UTC m=+1106.930823858" Jan 22 16:23:23 crc kubenswrapper[5032]: I0122 16:23:23.134746 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-d2fb-account-create-update-96qf2" podStartSLOduration=11.134728357 podStartE2EDuration="11.134728357s" podCreationTimestamp="2026-01-22 16:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:23.129535718 +0000 UTC m=+1106.941704749" watchObservedRunningTime="2026-01-22 16:23:23.134728357 +0000 UTC m=+1106.946897388" Jan 22 16:23:23 crc kubenswrapper[5032]: E0122 16:23:23.325628 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ebb92cf_7cfa_4d87_995f_eef5fbd8ec53.slice/crio-conmon-97959e7aa9727703f3f1a95f91a2dc69d3e21db93dd2f95f7769fe44222a07f0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod527109ed_e0d3_45c7_b9b4_ae78b90e28d3.slice/crio-20a77bc4b7f46522e4d6e6b01923b2f208f37916ac1495cb697dc3f9d3e6299b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ebb92cf_7cfa_4d87_995f_eef5fbd8ec53.slice/crio-97959e7aa9727703f3f1a95f91a2dc69d3e21db93dd2f95f7769fe44222a07f0.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.060429 5032 generic.go:334] "Generic (PLEG): container finished" podID="1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53" containerID="97959e7aa9727703f3f1a95f91a2dc69d3e21db93dd2f95f7769fe44222a07f0" exitCode=0 Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.060536 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8qf6r" event={"ID":"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53","Type":"ContainerDied","Data":"97959e7aa9727703f3f1a95f91a2dc69d3e21db93dd2f95f7769fe44222a07f0"} Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.063254 5032 generic.go:334] "Generic (PLEG): container finished" podID="99ab8480-ed26-4f7e-8086-30d0d1c2cf4b" containerID="a6a095be6930dff471c0024b2ae1b31cbb75fd7cfdb5e65c1b672ea125fed651" exitCode=0 Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.063369 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-15b8-account-create-update-tk2d2" event={"ID":"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b","Type":"ContainerDied","Data":"a6a095be6930dff471c0024b2ae1b31cbb75fd7cfdb5e65c1b672ea125fed651"} Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.070566 5032 generic.go:334] "Generic (PLEG): container finished" podID="1cd3c01c-a661-4710-81fe-92a4b7af8073" containerID="821a6564675ec50faf16ea7cc5a0fc3fa26aeb191eb526ad55e028df360200a9" exitCode=0 Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.072917 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-11dc-account-create-update-n66d9" event={"ID":"1cd3c01c-a661-4710-81fe-92a4b7af8073","Type":"ContainerDied","Data":"821a6564675ec50faf16ea7cc5a0fc3fa26aeb191eb526ad55e028df360200a9"} Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.076381 5032 generic.go:334] "Generic (PLEG): container finished" podID="aebc03fb-af9a-4652-8649-d8e2275faa46" containerID="8aee5ed8e0588ab1fa8d0fbdfd8a61d020ff2d997763e44fd9470465c0197cbf" exitCode=0 Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.076447 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d2fb-account-create-update-96qf2" event={"ID":"aebc03fb-af9a-4652-8649-d8e2275faa46","Type":"ContainerDied","Data":"8aee5ed8e0588ab1fa8d0fbdfd8a61d020ff2d997763e44fd9470465c0197cbf"} Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.078115 5032 generic.go:334] "Generic (PLEG): container finished" podID="527109ed-e0d3-45c7-b9b4-ae78b90e28d3" containerID="20a77bc4b7f46522e4d6e6b01923b2f208f37916ac1495cb697dc3f9d3e6299b" exitCode=0 Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.078134 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jkm9n" event={"ID":"527109ed-e0d3-45c7-b9b4-ae78b90e28d3","Type":"ContainerDied","Data":"20a77bc4b7f46522e4d6e6b01923b2f208f37916ac1495cb697dc3f9d3e6299b"} Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.079758 5032 generic.go:334] "Generic (PLEG): container finished" podID="09a04ba4-1451-4a64-9f1a-4f06fa96c352" containerID="e2bf8296ca86e8d3119644f07167aa50ba55dcddcfddf9d8a0f16b99461e7370" exitCode=0 Jan 22 16:23:24 crc kubenswrapper[5032]: I0122 16:23:24.079811 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lvq7q" event={"ID":"09a04ba4-1451-4a64-9f1a-4f06fa96c352","Type":"ContainerDied","Data":"e2bf8296ca86e8d3119644f07167aa50ba55dcddcfddf9d8a0f16b99461e7370"} Jan 22 16:23:25 crc kubenswrapper[5032]: I0122 16:23:25.098734 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"2cfbf25bb965d735c085bbf9a4efdb5753e92e0dff780adb93648a48f9f13c26"} Jan 22 16:23:25 crc kubenswrapper[5032]: I0122 16:23:25.099143 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"de6a45faab5f414e46ba87f1970ba8e62f3123a1494ba408048f856a1ae7c218"} Jan 22 16:23:26 crc kubenswrapper[5032]: I0122 16:23:26.134239 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"3e195dbfba27a8d13243c58c4a30a1cf7b1e52c1e6dc76f43f1e9419b089686e"} Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.154975 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-11dc-account-create-update-n66d9" event={"ID":"1cd3c01c-a661-4710-81fe-92a4b7af8073","Type":"ContainerDied","Data":"c2fca3851cd8b2e82c6bf881985c01a66a2f5c662832b5492f2dc72fb15c7209"} Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.155627 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2fca3851cd8b2e82c6bf881985c01a66a2f5c662832b5492f2dc72fb15c7209" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.158038 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d2fb-account-create-update-96qf2" event={"ID":"aebc03fb-af9a-4652-8649-d8e2275faa46","Type":"ContainerDied","Data":"c338d57a679186b429b5cf6dbedd10e01ced2cfe36dc1d66a6563f7f5b89428e"} Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.158096 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c338d57a679186b429b5cf6dbedd10e01ced2cfe36dc1d66a6563f7f5b89428e" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.160056 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jkm9n" event={"ID":"527109ed-e0d3-45c7-b9b4-ae78b90e28d3","Type":"ContainerDied","Data":"4dcc5fd688072dcc11c384a9ed4c738998de29e79517deb6496dfd9fb4fd3a1b"} Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.160089 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4dcc5fd688072dcc11c384a9ed4c738998de29e79517deb6496dfd9fb4fd3a1b" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.161811 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8qf6r" event={"ID":"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53","Type":"ContainerDied","Data":"79d6a96666924670cd5e843573376d5689eebef550a9e51c8344cce5fb75950c"} Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.161867 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79d6a96666924670cd5e843573376d5689eebef550a9e51c8344cce5fb75950c" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.163519 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-15b8-account-create-update-tk2d2" event={"ID":"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b","Type":"ContainerDied","Data":"b85a17d012003c7745b1aa842f49d8caf430b40ff8a6b101ac5fbe3d12c9a0b3"} Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.163555 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b85a17d012003c7745b1aa842f49d8caf430b40ff8a6b101ac5fbe3d12c9a0b3" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.165235 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lvq7q" event={"ID":"09a04ba4-1451-4a64-9f1a-4f06fa96c352","Type":"ContainerDied","Data":"8fd0fa5f5eae34ff099f45cef6645ae10ec37fb67addb7ad1797b17aa225ad86"} Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.165281 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8fd0fa5f5eae34ff099f45cef6645ae10ec37fb67addb7ad1797b17aa225ad86" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.203794 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.233751 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.245018 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.272461 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.283025 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.292752 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.304607 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzvjp\" (UniqueName: \"kubernetes.io/projected/1cd3c01c-a661-4710-81fe-92a4b7af8073-kube-api-access-wzvjp\") pod \"1cd3c01c-a661-4710-81fe-92a4b7af8073\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.304679 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1cd3c01c-a661-4710-81fe-92a4b7af8073-operator-scripts\") pod \"1cd3c01c-a661-4710-81fe-92a4b7af8073\" (UID: \"1cd3c01c-a661-4710-81fe-92a4b7af8073\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.312892 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1cd3c01c-a661-4710-81fe-92a4b7af8073-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1cd3c01c-a661-4710-81fe-92a4b7af8073" (UID: "1cd3c01c-a661-4710-81fe-92a4b7af8073"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406007 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pcr2\" (UniqueName: \"kubernetes.io/projected/aebc03fb-af9a-4652-8649-d8e2275faa46-kube-api-access-2pcr2\") pod \"aebc03fb-af9a-4652-8649-d8e2275faa46\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406042 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsvxv\" (UniqueName: \"kubernetes.io/projected/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-kube-api-access-tsvxv\") pod \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406075 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdrfm\" (UniqueName: \"kubernetes.io/projected/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-kube-api-access-bdrfm\") pod \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406130 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqcwf\" (UniqueName: \"kubernetes.io/projected/09a04ba4-1451-4a64-9f1a-4f06fa96c352-kube-api-access-tqcwf\") pod \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406152 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-operator-scripts\") pod \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406179 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9824\" (UniqueName: \"kubernetes.io/projected/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-kube-api-access-f9824\") pod \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\" (UID: \"527109ed-e0d3-45c7-b9b4-ae78b90e28d3\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406247 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aebc03fb-af9a-4652-8649-d8e2275faa46-operator-scripts\") pod \"aebc03fb-af9a-4652-8649-d8e2275faa46\" (UID: \"aebc03fb-af9a-4652-8649-d8e2275faa46\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406291 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09a04ba4-1451-4a64-9f1a-4f06fa96c352-operator-scripts\") pod \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\" (UID: \"09a04ba4-1451-4a64-9f1a-4f06fa96c352\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406306 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-operator-scripts\") pod \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\" (UID: \"99ab8480-ed26-4f7e-8086-30d0d1c2cf4b\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406339 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-operator-scripts\") pod \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\" (UID: \"1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53\") " Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406660 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1cd3c01c-a661-4710-81fe-92a4b7af8073-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406981 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aebc03fb-af9a-4652-8649-d8e2275faa46-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aebc03fb-af9a-4652-8649-d8e2275faa46" (UID: "aebc03fb-af9a-4652-8649-d8e2275faa46"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.406985 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53" (UID: "1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.407535 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09a04ba4-1451-4a64-9f1a-4f06fa96c352-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "09a04ba4-1451-4a64-9f1a-4f06fa96c352" (UID: "09a04ba4-1451-4a64-9f1a-4f06fa96c352"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.407621 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "527109ed-e0d3-45c7-b9b4-ae78b90e28d3" (UID: "527109ed-e0d3-45c7-b9b4-ae78b90e28d3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.408258 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "99ab8480-ed26-4f7e-8086-30d0d1c2cf4b" (UID: "99ab8480-ed26-4f7e-8086-30d0d1c2cf4b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.508534 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.508607 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aebc03fb-af9a-4652-8649-d8e2275faa46-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.508633 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09a04ba4-1451-4a64-9f1a-4f06fa96c352-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.508660 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:28 crc kubenswrapper[5032]: I0122 16:23:28.508683 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.007344 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-kube-api-access-bdrfm" (OuterVolumeSpecName: "kube-api-access-bdrfm") pod "99ab8480-ed26-4f7e-8086-30d0d1c2cf4b" (UID: "99ab8480-ed26-4f7e-8086-30d0d1c2cf4b"). InnerVolumeSpecName "kube-api-access-bdrfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.007467 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09a04ba4-1451-4a64-9f1a-4f06fa96c352-kube-api-access-tqcwf" (OuterVolumeSpecName: "kube-api-access-tqcwf") pod "09a04ba4-1451-4a64-9f1a-4f06fa96c352" (UID: "09a04ba4-1451-4a64-9f1a-4f06fa96c352"). InnerVolumeSpecName "kube-api-access-tqcwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.007472 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aebc03fb-af9a-4652-8649-d8e2275faa46-kube-api-access-2pcr2" (OuterVolumeSpecName: "kube-api-access-2pcr2") pod "aebc03fb-af9a-4652-8649-d8e2275faa46" (UID: "aebc03fb-af9a-4652-8649-d8e2275faa46"). InnerVolumeSpecName "kube-api-access-2pcr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.007593 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-kube-api-access-f9824" (OuterVolumeSpecName: "kube-api-access-f9824") pod "527109ed-e0d3-45c7-b9b4-ae78b90e28d3" (UID: "527109ed-e0d3-45c7-b9b4-ae78b90e28d3"). InnerVolumeSpecName "kube-api-access-f9824". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.007590 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-kube-api-access-tsvxv" (OuterVolumeSpecName: "kube-api-access-tsvxv") pod "1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53" (UID: "1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53"). InnerVolumeSpecName "kube-api-access-tsvxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.008198 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cd3c01c-a661-4710-81fe-92a4b7af8073-kube-api-access-wzvjp" (OuterVolumeSpecName: "kube-api-access-wzvjp") pod "1cd3c01c-a661-4710-81fe-92a4b7af8073" (UID: "1cd3c01c-a661-4710-81fe-92a4b7af8073"). InnerVolumeSpecName "kube-api-access-wzvjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.024141 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pcr2\" (UniqueName: \"kubernetes.io/projected/aebc03fb-af9a-4652-8649-d8e2275faa46-kube-api-access-2pcr2\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.024174 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsvxv\" (UniqueName: \"kubernetes.io/projected/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53-kube-api-access-tsvxv\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.024184 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdrfm\" (UniqueName: \"kubernetes.io/projected/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b-kube-api-access-bdrfm\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.024194 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqcwf\" (UniqueName: \"kubernetes.io/projected/09a04ba4-1451-4a64-9f1a-4f06fa96c352-kube-api-access-tqcwf\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.024202 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9824\" (UniqueName: \"kubernetes.io/projected/527109ed-e0d3-45c7-b9b4-ae78b90e28d3-kube-api-access-f9824\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.024211 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzvjp\" (UniqueName: \"kubernetes.io/projected/1cd3c01c-a661-4710-81fe-92a4b7af8073-kube-api-access-wzvjp\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.174694 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9zk6w" event={"ID":"300400f9-f92b-4159-982c-310398e166c8","Type":"ContainerStarted","Data":"fcc49d9fdf4773cd4c396d472b82808c15a0c7090fe3fab93499a48475821aca"} Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.178921 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-11dc-account-create-update-n66d9" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.179009 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"f9cdf1579d873944f4920c1565b1c698796a6e1c254e65fe56f57ac4f4063382"} Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.179051 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lvq7q" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.179055 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-15b8-account-create-update-tk2d2" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.179066 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8qf6r" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.179077 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d2fb-account-create-update-96qf2" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.178999 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jkm9n" Jan 22 16:23:29 crc kubenswrapper[5032]: I0122 16:23:29.209691 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-9zk6w" podStartSLOduration=10.599618365 podStartE2EDuration="16.209671857s" podCreationTimestamp="2026-01-22 16:23:13 +0000 UTC" firstStartedPulling="2026-01-22 16:23:22.390041408 +0000 UTC m=+1106.202210439" lastFinishedPulling="2026-01-22 16:23:28.00009488 +0000 UTC m=+1111.812263931" observedRunningTime="2026-01-22 16:23:29.201274612 +0000 UTC m=+1113.013443653" watchObservedRunningTime="2026-01-22 16:23:29.209671857 +0000 UTC m=+1113.021840888" Jan 22 16:23:31 crc kubenswrapper[5032]: I0122 16:23:31.218158 5032 generic.go:334] "Generic (PLEG): container finished" podID="b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" containerID="11843ef879c0aee64b416fe7c872b45eadec752543a0f84304c2801c8816f927" exitCode=0 Jan 22 16:23:31 crc kubenswrapper[5032]: I0122 16:23:31.218332 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vjptd" event={"ID":"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c","Type":"ContainerDied","Data":"11843ef879c0aee64b416fe7c872b45eadec752543a0f84304c2801c8816f927"} Jan 22 16:23:31 crc kubenswrapper[5032]: I0122 16:23:31.232477 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"117602a5faff9dc5e07ca343ea2077804cba70c074ace1d0360cd5f77402609e"} Jan 22 16:23:31 crc kubenswrapper[5032]: I0122 16:23:31.232527 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"1e912a43b7306dfaedf2fd30b2761a4e4b87052524bfe3a8c18cfd290b3e8c52"} Jan 22 16:23:31 crc kubenswrapper[5032]: I0122 16:23:31.232543 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"06d4c5d49eecc87c14ca64b0cb615c0957662f8934d77e38aea6522f1b1d3dc0"} Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.248669 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"e69201928b5bf20496f352e034c51613f244bf10517fb7c7b6eef2f72aa5f60a"} Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.250404 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"fcfcf6d953878e4237d6e8e2c112ce6fd61628d873e9c5453d5154b137b1474d"} Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.250548 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"6d57da7c7362d87935bbc7c4eae33b11b5f7b9b452970d20c4ef84eb56fa0e1c"} Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.649201 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.812030 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-combined-ca-bundle\") pod \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.812199 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-672gp\" (UniqueName: \"kubernetes.io/projected/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-kube-api-access-672gp\") pod \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.812498 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-db-sync-config-data\") pod \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.812778 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-config-data\") pod \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\" (UID: \"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c\") " Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.816939 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" (UID: "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.817226 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-kube-api-access-672gp" (OuterVolumeSpecName: "kube-api-access-672gp") pod "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" (UID: "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c"). InnerVolumeSpecName "kube-api-access-672gp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.838012 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" (UID: "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.867820 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-config-data" (OuterVolumeSpecName: "config-data") pod "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" (UID: "b8f0b2b5-fc74-4525-bf25-5d00abc5c89c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.915895 5032 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.915935 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.915949 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:32 crc kubenswrapper[5032]: I0122 16:23:32.915960 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-672gp\" (UniqueName: \"kubernetes.io/projected/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c-kube-api-access-672gp\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.016559 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.016632 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.260684 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vjptd" event={"ID":"b8f0b2b5-fc74-4525-bf25-5d00abc5c89c","Type":"ContainerDied","Data":"40568ab5fff71307cf7f82f48b121ca9136f5b86ca785db78c41da6b3f26184d"} Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.260729 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40568ab5fff71307cf7f82f48b121ca9136f5b86ca785db78c41da6b3f26184d" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.260786 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vjptd" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.263994 5032 generic.go:334] "Generic (PLEG): container finished" podID="300400f9-f92b-4159-982c-310398e166c8" containerID="fcc49d9fdf4773cd4c396d472b82808c15a0c7090fe3fab93499a48475821aca" exitCode=0 Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.264032 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9zk6w" event={"ID":"300400f9-f92b-4159-982c-310398e166c8","Type":"ContainerDied","Data":"fcc49d9fdf4773cd4c396d472b82808c15a0c7090fe3fab93499a48475821aca"} Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.601600 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-26tvt"] Jan 22 16:23:33 crc kubenswrapper[5032]: E0122 16:23:33.602222 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cd3c01c-a661-4710-81fe-92a4b7af8073" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602239 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cd3c01c-a661-4710-81fe-92a4b7af8073" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: E0122 16:23:33.602254 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aebc03fb-af9a-4652-8649-d8e2275faa46" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602260 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="aebc03fb-af9a-4652-8649-d8e2275faa46" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: E0122 16:23:33.602271 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99ab8480-ed26-4f7e-8086-30d0d1c2cf4b" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602278 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="99ab8480-ed26-4f7e-8086-30d0d1c2cf4b" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: E0122 16:23:33.602290 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602296 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: E0122 16:23:33.602309 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09a04ba4-1451-4a64-9f1a-4f06fa96c352" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602314 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="09a04ba4-1451-4a64-9f1a-4f06fa96c352" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: E0122 16:23:33.602334 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="527109ed-e0d3-45c7-b9b4-ae78b90e28d3" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602339 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="527109ed-e0d3-45c7-b9b4-ae78b90e28d3" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: E0122 16:23:33.602363 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" containerName="glance-db-sync" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602369 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" containerName="glance-db-sync" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602504 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="aebc03fb-af9a-4652-8649-d8e2275faa46" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602517 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="99ab8480-ed26-4f7e-8086-30d0d1c2cf4b" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602528 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cd3c01c-a661-4710-81fe-92a4b7af8073" containerName="mariadb-account-create-update" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602538 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" containerName="glance-db-sync" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602551 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="527109ed-e0d3-45c7-b9b4-ae78b90e28d3" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602561 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.602566 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="09a04ba4-1451-4a64-9f1a-4f06fa96c352" containerName="mariadb-database-create" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.603408 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.613639 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-26tvt"] Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.734021 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-config\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.734296 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.734378 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4pdp\" (UniqueName: \"kubernetes.io/projected/031e200b-3cec-464e-83d6-036daf55f777-kube-api-access-n4pdp\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.734575 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-dns-svc\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.734629 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.835762 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4pdp\" (UniqueName: \"kubernetes.io/projected/031e200b-3cec-464e-83d6-036daf55f777-kube-api-access-n4pdp\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.835838 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-dns-svc\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.835875 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.835908 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-config\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.836013 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.836684 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-dns-svc\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.836748 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.836891 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.836972 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-config\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.852430 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4pdp\" (UniqueName: \"kubernetes.io/projected/031e200b-3cec-464e-83d6-036daf55f777-kube-api-access-n4pdp\") pod \"dnsmasq-dns-74dc88fc-26tvt\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:33 crc kubenswrapper[5032]: I0122 16:23:33.925109 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.761906 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.875534 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h528\" (UniqueName: \"kubernetes.io/projected/300400f9-f92b-4159-982c-310398e166c8-kube-api-access-8h528\") pod \"300400f9-f92b-4159-982c-310398e166c8\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.875672 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-combined-ca-bundle\") pod \"300400f9-f92b-4159-982c-310398e166c8\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.875756 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-config-data\") pod \"300400f9-f92b-4159-982c-310398e166c8\" (UID: \"300400f9-f92b-4159-982c-310398e166c8\") " Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.882534 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/300400f9-f92b-4159-982c-310398e166c8-kube-api-access-8h528" (OuterVolumeSpecName: "kube-api-access-8h528") pod "300400f9-f92b-4159-982c-310398e166c8" (UID: "300400f9-f92b-4159-982c-310398e166c8"). InnerVolumeSpecName "kube-api-access-8h528". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.908189 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "300400f9-f92b-4159-982c-310398e166c8" (UID: "300400f9-f92b-4159-982c-310398e166c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.927202 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-config-data" (OuterVolumeSpecName: "config-data") pod "300400f9-f92b-4159-982c-310398e166c8" (UID: "300400f9-f92b-4159-982c-310398e166c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.977981 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.978017 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/300400f9-f92b-4159-982c-310398e166c8-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:35 crc kubenswrapper[5032]: I0122 16:23:35.978026 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h528\" (UniqueName: \"kubernetes.io/projected/300400f9-f92b-4159-982c-310398e166c8-kube-api-access-8h528\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:36 crc kubenswrapper[5032]: I0122 16:23:36.032605 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-26tvt"] Jan 22 16:23:36 crc kubenswrapper[5032]: W0122 16:23:36.039168 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod031e200b_3cec_464e_83d6_036daf55f777.slice/crio-fbab9aa8912d2b9405dc1a78c6a39753b2ca00c13c10c0e3dcd1df37696f9fd2 WatchSource:0}: Error finding container fbab9aa8912d2b9405dc1a78c6a39753b2ca00c13c10c0e3dcd1df37696f9fd2: Status 404 returned error can't find the container with id fbab9aa8912d2b9405dc1a78c6a39753b2ca00c13c10c0e3dcd1df37696f9fd2 Jan 22 16:23:36 crc kubenswrapper[5032]: I0122 16:23:36.666316 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" event={"ID":"031e200b-3cec-464e-83d6-036daf55f777","Type":"ContainerStarted","Data":"fbab9aa8912d2b9405dc1a78c6a39753b2ca00c13c10c0e3dcd1df37696f9fd2"} Jan 22 16:23:36 crc kubenswrapper[5032]: I0122 16:23:36.668324 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9zk6w" event={"ID":"300400f9-f92b-4159-982c-310398e166c8","Type":"ContainerDied","Data":"bf8af30341ec149b58b5d6f5e20f820d18bd3f53af314cf5d1e0cc72edad7d35"} Jan 22 16:23:36 crc kubenswrapper[5032]: I0122 16:23:36.668348 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf8af30341ec149b58b5d6f5e20f820d18bd3f53af314cf5d1e0cc72edad7d35" Jan 22 16:23:36 crc kubenswrapper[5032]: I0122 16:23:36.668391 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9zk6w" Jan 22 16:23:36 crc kubenswrapper[5032]: I0122 16:23:36.988447 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-26tvt"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.006329 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8w25f"] Jan 22 16:23:37 crc kubenswrapper[5032]: E0122 16:23:37.006665 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300400f9-f92b-4159-982c-310398e166c8" containerName="keystone-db-sync" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.006685 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="300400f9-f92b-4159-982c-310398e166c8" containerName="keystone-db-sync" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.006838 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="300400f9-f92b-4159-982c-310398e166c8" containerName="keystone-db-sync" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.009986 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.014835 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.015253 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.015361 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.015559 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.015703 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wncsb" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.037952 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-7d5gm"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.039234 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.049968 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8w25f"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.085387 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-7d5gm"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.199658 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szcnn\" (UniqueName: \"kubernetes.io/projected/7808724d-5ed5-4925-8345-80fb4c765340-kube-api-access-szcnn\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.199698 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-combined-ca-bundle\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.199794 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89ftp\" (UniqueName: \"kubernetes.io/projected/5e9e211d-267e-4d76-be04-75a83bf35c14-kube-api-access-89ftp\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.199823 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-fernet-keys\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.199880 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-config\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.199945 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-credential-keys\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.199990 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-scripts\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.200021 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-sb\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.200044 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-dns-svc\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.200092 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-config-data\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.200115 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.203867 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-gf5jg"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.204836 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.213016 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-v4dkx" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.213245 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.216517 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.223407 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6595764dd9-5wn45"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.226406 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.233585 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-gdsw6" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.233832 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.235491 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.235605 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.236637 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-dxmk2"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.238350 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.249139 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-gf5jg"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.252387 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-tgncl" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.252632 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.252844 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.258838 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6595764dd9-5wn45"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.268171 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dxmk2"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302613 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-config\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302675 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-credential-keys\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302710 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-scripts\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302741 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-sb\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302763 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-dns-svc\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302780 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-config-data\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302795 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302830 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szcnn\" (UniqueName: \"kubernetes.io/projected/7808724d-5ed5-4925-8345-80fb4c765340-kube-api-access-szcnn\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.302852 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-combined-ca-bundle\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.304287 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89ftp\" (UniqueName: \"kubernetes.io/projected/5e9e211d-267e-4d76-be04-75a83bf35c14-kube-api-access-89ftp\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.304347 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-fernet-keys\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.304561 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-sb\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.305653 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-config\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.308208 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.308944 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-dns-svc\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.315218 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-credential-keys\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.321913 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-combined-ca-bundle\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.323300 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-scripts\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.329368 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-config-data\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.351748 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-fernet-keys\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.353145 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szcnn\" (UniqueName: \"kubernetes.io/projected/7808724d-5ed5-4925-8345-80fb4c765340-kube-api-access-szcnn\") pod \"dnsmasq-dns-7d5679f497-7d5gm\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.354230 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89ftp\" (UniqueName: \"kubernetes.io/projected/5e9e211d-267e-4d76-be04-75a83bf35c14-kube-api-access-89ftp\") pod \"keystone-bootstrap-8w25f\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.374044 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.403786 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-75d7ccfbb5-tsmmc"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406049 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrjv4\" (UniqueName: \"kubernetes.io/projected/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-kube-api-access-mrjv4\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406107 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-combined-ca-bundle\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406137 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-scripts\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406293 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da9d98b4-fc68-4ea3-adc0-8992f52076d2-horizon-secret-key\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406363 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-etc-machine-id\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406386 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-config\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406411 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-db-sync-config-data\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406634 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzmc2\" (UniqueName: \"kubernetes.io/projected/8b048493-c081-48d3-a908-70bf0efeba64-kube-api-access-kzmc2\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406822 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-config-data\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406898 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-combined-ca-bundle\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406925 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4fn6\" (UniqueName: \"kubernetes.io/projected/da9d98b4-fc68-4ea3-adc0-8992f52076d2-kube-api-access-v4fn6\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.406949 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-scripts\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.407164 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da9d98b4-fc68-4ea3-adc0-8992f52076d2-logs\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.407197 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-config-data\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.408108 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.471172 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-75d7ccfbb5-tsmmc"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538115 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-config-data\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538157 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-etc-machine-id\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538180 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-config\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538197 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b761c2e8-3ed5-40ce-8ae8-eb181db52692-logs\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538217 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-db-sync-config-data\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538242 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzmc2\" (UniqueName: \"kubernetes.io/projected/8b048493-c081-48d3-a908-70bf0efeba64-kube-api-access-kzmc2\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538439 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-etc-machine-id\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538452 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-config-data\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538511 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-combined-ca-bundle\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.538547 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4fn6\" (UniqueName: \"kubernetes.io/projected/da9d98b4-fc68-4ea3-adc0-8992f52076d2-kube-api-access-v4fn6\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.549270 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-scripts\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.549487 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b761c2e8-3ed5-40ce-8ae8-eb181db52692-horizon-secret-key\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.549642 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfwpd\" (UniqueName: \"kubernetes.io/projected/b761c2e8-3ed5-40ce-8ae8-eb181db52692-kube-api-access-kfwpd\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.549734 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da9d98b4-fc68-4ea3-adc0-8992f52076d2-logs\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.549914 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-scripts\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.550333 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-config-data\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.550544 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrjv4\" (UniqueName: \"kubernetes.io/projected/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-kube-api-access-mrjv4\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.550590 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-combined-ca-bundle\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.550625 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-scripts\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.550655 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da9d98b4-fc68-4ea3-adc0-8992f52076d2-horizon-secret-key\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.549739 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-config\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.551218 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da9d98b4-fc68-4ea3-adc0-8992f52076d2-logs\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.552344 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-config-data\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.553384 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-config-data\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.553670 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da9d98b4-fc68-4ea3-adc0-8992f52076d2-horizon-secret-key\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.552179 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-scripts\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.556535 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-db-sync-config-data\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.556586 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-combined-ca-bundle\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.563172 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzmc2\" (UniqueName: \"kubernetes.io/projected/8b048493-c081-48d3-a908-70bf0efeba64-kube-api-access-kzmc2\") pod \"neutron-db-sync-gf5jg\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.565432 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-combined-ca-bundle\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.566912 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-7d5gm"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.573983 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-scripts\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.594225 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrjv4\" (UniqueName: \"kubernetes.io/projected/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-kube-api-access-mrjv4\") pod \"cinder-db-sync-dxmk2\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.602458 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.604672 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.605807 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4fn6\" (UniqueName: \"kubernetes.io/projected/da9d98b4-fc68-4ea3-adc0-8992f52076d2-kube-api-access-v4fn6\") pod \"horizon-6595764dd9-5wn45\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.611787 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.611819 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.640930 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.644922 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652408 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652467 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-config-data\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652486 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b761c2e8-3ed5-40ce-8ae8-eb181db52692-logs\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652510 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-config-data\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652530 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-run-httpd\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652565 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b761c2e8-3ed5-40ce-8ae8-eb181db52692-horizon-secret-key\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652594 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-log-httpd\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652612 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfwpd\" (UniqueName: \"kubernetes.io/projected/b761c2e8-3ed5-40ce-8ae8-eb181db52692-kube-api-access-kfwpd\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652627 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqrr7\" (UniqueName: \"kubernetes.io/projected/dcdacfde-30ea-4db9-af16-525aac6f2d5f-kube-api-access-zqrr7\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652650 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-scripts\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652676 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-scripts\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.652691 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.653774 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-config-data\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.654009 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b761c2e8-3ed5-40ce-8ae8-eb181db52692-logs\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.654792 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.656657 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-scripts\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.656663 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.661316 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.661360 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-l9wcj" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.661468 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.665177 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.672584 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b761c2e8-3ed5-40ce-8ae8-eb181db52692-horizon-secret-key\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.678654 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfwpd\" (UniqueName: \"kubernetes.io/projected/b761c2e8-3ed5-40ce-8ae8-eb181db52692-kube-api-access-kfwpd\") pod \"horizon-75d7ccfbb5-tsmmc\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.705401 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-jb65g"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.707282 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.710454 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.711751 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-9fxfm" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.716372 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.728154 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56798b757f-jcl6r"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.729556 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.750608 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-ng56d"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.751516 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.752322 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754421 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-config-data\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754468 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-run-httpd\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754499 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-combined-ca-bundle\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754535 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jz2k5\" (UniqueName: \"kubernetes.io/projected/175d212f-92e2-468a-ac51-5c821d160842-kube-api-access-jz2k5\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754590 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-dns-svc\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754616 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-log-httpd\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754646 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqrr7\" (UniqueName: \"kubernetes.io/projected/dcdacfde-30ea-4db9-af16-525aac6f2d5f-kube-api-access-zqrr7\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754694 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw2vw\" (UniqueName: \"kubernetes.io/projected/7f16880c-2d36-4873-b54b-fc7d51ca51cd-kube-api-access-qw2vw\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754733 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754763 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-scripts\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754784 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754880 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-scripts\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754939 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-db-sync-config-data\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754963 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-logs\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.754995 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.755041 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhgjs\" (UniqueName: \"kubernetes.io/projected/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-kube-api-access-fhgjs\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.755089 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-config\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.755111 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-combined-ca-bundle\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.755149 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-config-data\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.755172 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.755667 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-log-httpd\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.756272 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.761280 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-xfthf" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.761648 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.762037 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.774032 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-jb65g"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.777898 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-run-httpd\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.778062 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-jcl6r"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.778808 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.781985 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ng56d"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.786963 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.788654 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.795725 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-scripts\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.796479 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqrr7\" (UniqueName: \"kubernetes.io/projected/dcdacfde-30ea-4db9-af16-525aac6f2d5f-kube-api-access-zqrr7\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.796648 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-config-data\") pod \"ceilometer-0\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.797053 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.797237 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.846942 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.853295 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858220 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-combined-ca-bundle\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858269 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-config-data\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858287 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858308 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-combined-ca-bundle\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858332 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jz2k5\" (UniqueName: \"kubernetes.io/projected/175d212f-92e2-468a-ac51-5c821d160842-kube-api-access-jz2k5\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858356 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858376 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858396 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858434 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-dns-svc\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858454 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw2vw\" (UniqueName: \"kubernetes.io/projected/7f16880c-2d36-4873-b54b-fc7d51ca51cd-kube-api-access-qw2vw\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858470 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-logs\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858490 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858514 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858540 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-scripts\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858565 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-db-sync-config-data\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858583 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-logs\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858600 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-config-data\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858615 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54j2t\" (UniqueName: \"kubernetes.io/projected/fa021065-770b-4137-aa62-be9093cccc34-kube-api-access-54j2t\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858635 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhgjs\" (UniqueName: \"kubernetes.io/projected/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-kube-api-access-fhgjs\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858658 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-scripts\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.858679 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-config\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.859591 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-config\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.860139 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.862332 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-logs\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.863342 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-dns-svc\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.865037 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.874213 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.875807 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-config-data\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.881135 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.882032 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jz2k5\" (UniqueName: \"kubernetes.io/projected/175d212f-92e2-468a-ac51-5c821d160842-kube-api-access-jz2k5\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.882085 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-scripts\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.883714 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-db-sync-config-data\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.886430 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhgjs\" (UniqueName: \"kubernetes.io/projected/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-kube-api-access-fhgjs\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.887084 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-combined-ca-bundle\") pod \"barbican-db-sync-jb65g\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.888301 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw2vw\" (UniqueName: \"kubernetes.io/projected/7f16880c-2d36-4873-b54b-fc7d51ca51cd-kube-api-access-qw2vw\") pod \"dnsmasq-dns-56798b757f-jcl6r\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.898583 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-combined-ca-bundle\") pod \"placement-db-sync-ng56d\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.947085 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.961757 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.961836 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.961892 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.961925 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-config-data\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964175 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54j2t\" (UniqueName: \"kubernetes.io/projected/fa021065-770b-4137-aa62-be9093cccc34-kube-api-access-54j2t\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964282 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-scripts\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964372 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964462 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964503 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964528 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964555 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964582 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964622 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf84z\" (UniqueName: \"kubernetes.io/projected/f2bb020b-6e25-4841-91db-4bc1a4b21f82-kube-api-access-lf84z\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964689 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-logs\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964726 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-logs\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.964757 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.971620 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.975471 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.976245 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-logs\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.979379 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-scripts\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.979508 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.983836 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:37 crc kubenswrapper[5032]: I0122 16:23:37.987723 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.000029 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-config-data\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.003377 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.007996 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ng56d" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075190 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075234 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075302 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075345 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075366 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075392 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf84z\" (UniqueName: \"kubernetes.io/projected/f2bb020b-6e25-4841-91db-4bc1a4b21f82-kube-api-access-lf84z\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075445 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-logs\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075505 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.075641 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.076231 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-logs\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.076937 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.081384 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.085950 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.086577 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.091063 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.101161 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf84z\" (UniqueName: \"kubernetes.io/projected/f2bb020b-6e25-4841-91db-4bc1a4b21f82-kube-api-access-lf84z\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.125162 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.178193 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-jb65g" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.268566 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-7d5gm"] Jan 22 16:23:38 crc kubenswrapper[5032]: W0122 16:23:38.268816 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7808724d_5ed5_4925_8345_80fb4c765340.slice/crio-9ec7cd4634304736c0141870df111495e7242b68c96b878286ec02bf74126847 WatchSource:0}: Error finding container 9ec7cd4634304736c0141870df111495e7242b68c96b878286ec02bf74126847: Status 404 returned error can't find the container with id 9ec7cd4634304736c0141870df111495e7242b68c96b878286ec02bf74126847 Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.336259 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.377625 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54j2t\" (UniqueName: \"kubernetes.io/projected/fa021065-770b-4137-aa62-be9093cccc34-kube-api-access-54j2t\") pod \"glance-default-external-api-0\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " pod="openstack/glance-default-external-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.401149 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8w25f"] Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.491370 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.573719 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-75d7ccfbb5-tsmmc"] Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.744799 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" event={"ID":"031e200b-3cec-464e-83d6-036daf55f777","Type":"ContainerStarted","Data":"8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96"} Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.745240 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" podUID="031e200b-3cec-464e-83d6-036daf55f777" containerName="init" containerID="cri-o://8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96" gracePeriod=10 Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.748228 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75d7ccfbb5-tsmmc" event={"ID":"b761c2e8-3ed5-40ce-8ae8-eb181db52692","Type":"ContainerStarted","Data":"43286fe0004b5b2a1c0853f3d233af9dd48775a99072c0183daf90e5c57858a3"} Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.773613 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8w25f" event={"ID":"5e9e211d-267e-4d76-be04-75a83bf35c14","Type":"ContainerStarted","Data":"73c942a887999f5be7cd708725b26ea57fcb3becda72e6c977803bb164ea284f"} Jan 22 16:23:38 crc kubenswrapper[5032]: I0122 16:23:38.787120 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" event={"ID":"7808724d-5ed5-4925-8345-80fb4c765340","Type":"ContainerStarted","Data":"9ec7cd4634304736c0141870df111495e7242b68c96b878286ec02bf74126847"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.006930 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.039295 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6595764dd9-5wn45"] Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.058332 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-gf5jg"] Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.102808 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-jcl6r"] Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.120052 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dxmk2"] Jan 22 16:23:39 crc kubenswrapper[5032]: W0122 16:23:39.123064 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18e81df2_1e39_45a9_9ea9_c9b9a54b92f7.slice/crio-1c298fe26cdc0b85d5e2da889ad4f560268af5e8cbf23d5fecea550693f6ff68 WatchSource:0}: Error finding container 1c298fe26cdc0b85d5e2da889ad4f560268af5e8cbf23d5fecea550693f6ff68: Status 404 returned error can't find the container with id 1c298fe26cdc0b85d5e2da889ad4f560268af5e8cbf23d5fecea550693f6ff68 Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.131582 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ng56d"] Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.321424 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-jb65g"] Jan 22 16:23:39 crc kubenswrapper[5032]: W0122 16:23:39.335353 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod175d212f_92e2_468a_ac51_5c821d160842.slice/crio-fe0bbcbea215ffe01e20b9c7a3f5524474d58c364a98256a754ec2c7961c7e26 WatchSource:0}: Error finding container fe0bbcbea215ffe01e20b9c7a3f5524474d58c364a98256a754ec2c7961c7e26: Status 404 returned error can't find the container with id fe0bbcbea215ffe01e20b9c7a3f5524474d58c364a98256a754ec2c7961c7e26 Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.423722 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:23:39 crc kubenswrapper[5032]: W0122 16:23:39.478359 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2bb020b_6e25_4841_91db_4bc1a4b21f82.slice/crio-f188b7031d49e5fbc925b926ff0219e7167e430153dee2febd85344dbae973c5 WatchSource:0}: Error finding container f188b7031d49e5fbc925b926ff0219e7167e430153dee2febd85344dbae973c5: Status 404 returned error can't find the container with id f188b7031d49e5fbc925b926ff0219e7167e430153dee2febd85344dbae973c5 Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.505089 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:23:39 crc kubenswrapper[5032]: W0122 16:23:39.515746 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa021065_770b_4137_aa62_be9093cccc34.slice/crio-9ca27bcffe8fd25f80ca8f35b7683cfdad3b4c1c96ca40fcec6f598169be63aa WatchSource:0}: Error finding container 9ca27bcffe8fd25f80ca8f35b7683cfdad3b4c1c96ca40fcec6f598169be63aa: Status 404 returned error can't find the container with id 9ca27bcffe8fd25f80ca8f35b7683cfdad3b4c1c96ca40fcec6f598169be63aa Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.780252 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.807165 5032 generic.go:334] "Generic (PLEG): container finished" podID="031e200b-3cec-464e-83d6-036daf55f777" containerID="8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96" exitCode=0 Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.807222 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" event={"ID":"031e200b-3cec-464e-83d6-036daf55f777","Type":"ContainerDied","Data":"8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.807233 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.807386 5032 scope.go:117] "RemoveContainer" containerID="8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96" Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.807265 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-26tvt" event={"ID":"031e200b-3cec-464e-83d6-036daf55f777","Type":"ContainerDied","Data":"fbab9aa8912d2b9405dc1a78c6a39753b2ca00c13c10c0e3dcd1df37696f9fd2"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.814575 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2bb020b-6e25-4841-91db-4bc1a4b21f82","Type":"ContainerStarted","Data":"f188b7031d49e5fbc925b926ff0219e7167e430153dee2febd85344dbae973c5"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.815876 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ng56d" event={"ID":"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7","Type":"ContainerStarted","Data":"1c298fe26cdc0b85d5e2da889ad4f560268af5e8cbf23d5fecea550693f6ff68"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.829216 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2f2285d-ead9-458a-a0ef-ed666b86e12f","Type":"ContainerStarted","Data":"50294e7938b54d20e8b6316f6abcec133a79f264989a102ca8b812384d8057ee"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.833331 5032 generic.go:334] "Generic (PLEG): container finished" podID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerID="83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5" exitCode=0 Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.833409 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" event={"ID":"7f16880c-2d36-4873-b54b-fc7d51ca51cd","Type":"ContainerDied","Data":"83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.833456 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" event={"ID":"7f16880c-2d36-4873-b54b-fc7d51ca51cd","Type":"ContainerStarted","Data":"6a1612159bd4194212611d730964960ce32adfb7b0db099e1f35df91c929299b"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.885682 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=43.685610692 podStartE2EDuration="1m2.885662501s" podCreationTimestamp="2026-01-22 16:22:37 +0000 UTC" firstStartedPulling="2026-01-22 16:23:11.258074194 +0000 UTC m=+1095.070243235" lastFinishedPulling="2026-01-22 16:23:30.458126013 +0000 UTC m=+1114.270295044" observedRunningTime="2026-01-22 16:23:39.869424167 +0000 UTC m=+1123.681593188" watchObservedRunningTime="2026-01-22 16:23:39.885662501 +0000 UTC m=+1123.697831532" Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.906911 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6595764dd9-5wn45" event={"ID":"da9d98b4-fc68-4ea3-adc0-8992f52076d2","Type":"ContainerStarted","Data":"ff83683634985db7876b08f2040e2287322353dd1c56e8f172b8ec67aa83f763"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.908943 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerStarted","Data":"956b62437028662d752db4b119048ed8a3512baa180a33b3b62fcacafa44becd"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.910626 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fa021065-770b-4137-aa62-be9093cccc34","Type":"ContainerStarted","Data":"9ca27bcffe8fd25f80ca8f35b7683cfdad3b4c1c96ca40fcec6f598169be63aa"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.912150 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8w25f" event={"ID":"5e9e211d-267e-4d76-be04-75a83bf35c14","Type":"ContainerStarted","Data":"174bfa085d80eeecb78b6cadbd42931be570bb7c8eb00a34d30cabd4b1a658a9"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.917314 5032 generic.go:334] "Generic (PLEG): container finished" podID="7808724d-5ed5-4925-8345-80fb4c765340" containerID="f0dea7c490ba8dd87e0d65647dfbd5fa5f0e9cad03c9793173afd61fb4475bce" exitCode=0 Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.917387 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" event={"ID":"7808724d-5ed5-4925-8345-80fb4c765340","Type":"ContainerDied","Data":"f0dea7c490ba8dd87e0d65647dfbd5fa5f0e9cad03c9793173afd61fb4475bce"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.919539 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dxmk2" event={"ID":"857e311d-5f3f-4c81-9f86-6d0a3c9e0307","Type":"ContainerStarted","Data":"4a893f5990a0b852305bf95912792f30a34a167492af638f1311fc8ea5ca6578"} Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.928151 5032 scope.go:117] "RemoveContainer" containerID="8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96" Jan 22 16:23:39 crc kubenswrapper[5032]: E0122 16:23:39.938335 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96\": container with ID starting with 8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96 not found: ID does not exist" containerID="8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96" Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.938391 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96"} err="failed to get container status \"8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96\": rpc error: code = NotFound desc = could not find container \"8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96\": container with ID starting with 8f9e6b2b050a0670f834dfab251409b14f8dbb20d8fc6f8882b50cb3ee3b9f96 not found: ID does not exist" Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.945339 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-dns-svc\") pod \"031e200b-3cec-464e-83d6-036daf55f777\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.945398 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4pdp\" (UniqueName: \"kubernetes.io/projected/031e200b-3cec-464e-83d6-036daf55f777-kube-api-access-n4pdp\") pod \"031e200b-3cec-464e-83d6-036daf55f777\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.945453 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-sb\") pod \"031e200b-3cec-464e-83d6-036daf55f777\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.945520 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-nb\") pod \"031e200b-3cec-464e-83d6-036daf55f777\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.945545 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-config\") pod \"031e200b-3cec-464e-83d6-036daf55f777\" (UID: \"031e200b-3cec-464e-83d6-036daf55f777\") " Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.968831 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8w25f" podStartSLOduration=3.968796964 podStartE2EDuration="3.968796964s" podCreationTimestamp="2026-01-22 16:23:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:39.962037133 +0000 UTC m=+1123.774206174" watchObservedRunningTime="2026-01-22 16:23:39.968796964 +0000 UTC m=+1123.780965995" Jan 22 16:23:39 crc kubenswrapper[5032]: I0122 16:23:39.995613 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "031e200b-3cec-464e-83d6-036daf55f777" (UID: "031e200b-3cec-464e-83d6-036daf55f777"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.003301 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/031e200b-3cec-464e-83d6-036daf55f777-kube-api-access-n4pdp" (OuterVolumeSpecName: "kube-api-access-n4pdp") pod "031e200b-3cec-464e-83d6-036daf55f777" (UID: "031e200b-3cec-464e-83d6-036daf55f777"). InnerVolumeSpecName "kube-api-access-n4pdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.015945 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-config" (OuterVolumeSpecName: "config") pod "031e200b-3cec-464e-83d6-036daf55f777" (UID: "031e200b-3cec-464e-83d6-036daf55f777"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.044342 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "031e200b-3cec-464e-83d6-036daf55f777" (UID: "031e200b-3cec-464e-83d6-036daf55f777"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.045341 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "031e200b-3cec-464e-83d6-036daf55f777" (UID: "031e200b-3cec-464e-83d6-036daf55f777"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.052464 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.052497 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.052507 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.052515 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/031e200b-3cec-464e-83d6-036daf55f777-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.052524 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4pdp\" (UniqueName: \"kubernetes.io/projected/031e200b-3cec-464e-83d6-036daf55f777-kube-api-access-n4pdp\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.070282 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gf5jg" event={"ID":"8b048493-c081-48d3-a908-70bf0efeba64","Type":"ContainerStarted","Data":"0ff743aaa4eaee22f03fe812ea2d5b92cb2e3024f4648a0020790760250a0f33"} Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.070330 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gf5jg" event={"ID":"8b048493-c081-48d3-a908-70bf0efeba64","Type":"ContainerStarted","Data":"e0bf3a71c92568d74f6ee40ea62e080b842993a80f8410d9ea4ae9179c4daf43"} Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.106514 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-jb65g" event={"ID":"175d212f-92e2-468a-ac51-5c821d160842","Type":"ContainerStarted","Data":"fe0bbcbea215ffe01e20b9c7a3f5524474d58c364a98256a754ec2c7961c7e26"} Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.124079 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-gf5jg" podStartSLOduration=3.124056634 podStartE2EDuration="3.124056634s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:40.109981638 +0000 UTC m=+1123.922150669" watchObservedRunningTime="2026-01-22 16:23:40.124056634 +0000 UTC m=+1123.936225665" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.158106 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.383525 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6595764dd9-5wn45"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.407526 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-26tvt"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.413922 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-26tvt"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.420903 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.428994 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6dddcc7fbc-q5nc8"] Jan 22 16:23:40 crc kubenswrapper[5032]: E0122 16:23:40.429474 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031e200b-3cec-464e-83d6-036daf55f777" containerName="init" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.429498 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="031e200b-3cec-464e-83d6-036daf55f777" containerName="init" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.429708 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="031e200b-3cec-464e-83d6-036daf55f777" containerName="init" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.430726 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.431882 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-jcl6r"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.437897 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dddcc7fbc-q5nc8"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.495699 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-scripts\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.495759 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-config-data\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.495893 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz2g6\" (UniqueName: \"kubernetes.io/projected/da70765e-3c15-45e5-af67-893b3910a90f-kube-api-access-rz2g6\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.495922 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da70765e-3c15-45e5-af67-893b3910a90f-logs\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.495974 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da70765e-3c15-45e5-af67-893b3910a90f-horizon-secret-key\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.564933 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="031e200b-3cec-464e-83d6-036daf55f777" path="/var/lib/kubelet/pods/031e200b-3cec-464e-83d6-036daf55f777/volumes" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.589558 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.589594 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4859s"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.590784 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4859s"] Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.597039 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-scripts\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.597095 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-config-data\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.597190 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz2g6\" (UniqueName: \"kubernetes.io/projected/da70765e-3c15-45e5-af67-893b3910a90f-kube-api-access-rz2g6\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.597211 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da70765e-3c15-45e5-af67-893b3910a90f-logs\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.597251 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da70765e-3c15-45e5-af67-893b3910a90f-horizon-secret-key\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.598924 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-config-data\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.601358 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-scripts\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.601447 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da70765e-3c15-45e5-af67-893b3910a90f-logs\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.616065 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.633018 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.636679 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da70765e-3c15-45e5-af67-893b3910a90f-horizon-secret-key\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.648022 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz2g6\" (UniqueName: \"kubernetes.io/projected/da70765e-3c15-45e5-af67-893b3910a90f-kube-api-access-rz2g6\") pod \"horizon-6dddcc7fbc-q5nc8\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.701917 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.701979 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.702049 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-config\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.702077 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fsrm\" (UniqueName: \"kubernetes.io/projected/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-kube-api-access-7fsrm\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.702134 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.702150 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.797262 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.804070 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.804120 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.804188 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-config\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.804212 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fsrm\" (UniqueName: \"kubernetes.io/projected/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-kube-api-access-7fsrm\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.804263 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.804279 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.805299 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.805435 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.805520 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-config\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.806134 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.806428 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.833236 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fsrm\" (UniqueName: \"kubernetes.io/projected/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-kube-api-access-7fsrm\") pod \"dnsmasq-dns-56df8fb6b7-4859s\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.838514 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.907010 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-nb\") pod \"7808724d-5ed5-4925-8345-80fb4c765340\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.907058 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-config\") pod \"7808724d-5ed5-4925-8345-80fb4c765340\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.907614 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szcnn\" (UniqueName: \"kubernetes.io/projected/7808724d-5ed5-4925-8345-80fb4c765340-kube-api-access-szcnn\") pod \"7808724d-5ed5-4925-8345-80fb4c765340\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.907826 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-dns-svc\") pod \"7808724d-5ed5-4925-8345-80fb4c765340\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.907922 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-sb\") pod \"7808724d-5ed5-4925-8345-80fb4c765340\" (UID: \"7808724d-5ed5-4925-8345-80fb4c765340\") " Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.913659 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7808724d-5ed5-4925-8345-80fb4c765340-kube-api-access-szcnn" (OuterVolumeSpecName: "kube-api-access-szcnn") pod "7808724d-5ed5-4925-8345-80fb4c765340" (UID: "7808724d-5ed5-4925-8345-80fb4c765340"). InnerVolumeSpecName "kube-api-access-szcnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.936469 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.944173 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7808724d-5ed5-4925-8345-80fb4c765340" (UID: "7808724d-5ed5-4925-8345-80fb4c765340"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.952935 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-config" (OuterVolumeSpecName: "config") pod "7808724d-5ed5-4925-8345-80fb4c765340" (UID: "7808724d-5ed5-4925-8345-80fb4c765340"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.963548 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7808724d-5ed5-4925-8345-80fb4c765340" (UID: "7808724d-5ed5-4925-8345-80fb4c765340"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:40 crc kubenswrapper[5032]: I0122 16:23:40.970583 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7808724d-5ed5-4925-8345-80fb4c765340" (UID: "7808724d-5ed5-4925-8345-80fb4c765340"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.009950 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szcnn\" (UniqueName: \"kubernetes.io/projected/7808724d-5ed5-4925-8345-80fb4c765340-kube-api-access-szcnn\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.009977 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.009987 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.009996 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.010004 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7808724d-5ed5-4925-8345-80fb4c765340-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.152149 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" event={"ID":"7808724d-5ed5-4925-8345-80fb4c765340","Type":"ContainerDied","Data":"9ec7cd4634304736c0141870df111495e7242b68c96b878286ec02bf74126847"} Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.153643 5032 scope.go:117] "RemoveContainer" containerID="f0dea7c490ba8dd87e0d65647dfbd5fa5f0e9cad03c9793173afd61fb4475bce" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.152215 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5679f497-7d5gm" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.236983 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" event={"ID":"7f16880c-2d36-4873-b54b-fc7d51ca51cd","Type":"ContainerStarted","Data":"0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684"} Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.237155 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" podUID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerName="dnsmasq-dns" containerID="cri-o://0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684" gracePeriod=10 Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.237430 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.290136 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fa021065-770b-4137-aa62-be9093cccc34","Type":"ContainerStarted","Data":"8ee446ec39fba1cb4446fb31cc2ea9e7e21e7bd6b18cc1847dbcef5ab96d3fce"} Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.296149 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2bb020b-6e25-4841-91db-4bc1a4b21f82","Type":"ContainerStarted","Data":"9ffcc52d8df299059282d70433514eddcdbd96f7b599a187e7c5a52ce4cdf6f4"} Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.335470 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-7d5gm"] Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.355294 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d5679f497-7d5gm"] Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.362410 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" podStartSLOduration=4.36236915 podStartE2EDuration="4.36236915s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:41.302436897 +0000 UTC m=+1125.114605928" watchObservedRunningTime="2026-01-22 16:23:41.36236915 +0000 UTC m=+1125.174538181" Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.594118 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4859s"] Jan 22 16:23:41 crc kubenswrapper[5032]: I0122 16:23:41.724534 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dddcc7fbc-q5nc8"] Jan 22 16:23:42 crc kubenswrapper[5032]: I0122 16:23:42.308929 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dddcc7fbc-q5nc8" event={"ID":"da70765e-3c15-45e5-af67-893b3910a90f","Type":"ContainerStarted","Data":"ac004aafa5a8fc5e05e268c01c222855646e7bf5a0d4c14e2cff95a36a7ec892"} Jan 22 16:23:42 crc kubenswrapper[5032]: I0122 16:23:42.313943 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" event={"ID":"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf","Type":"ContainerStarted","Data":"47917194ca243cb785b58e11385ef3a9dc43b4f7b11661e9657963e03c74de37"} Jan 22 16:23:42 crc kubenswrapper[5032]: I0122 16:23:42.471770 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7808724d-5ed5-4925-8345-80fb4c765340" path="/var/lib/kubelet/pods/7808724d-5ed5-4925-8345-80fb4c765340/volumes" Jan 22 16:23:42 crc kubenswrapper[5032]: I0122 16:23:42.962660 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.096991 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-config\") pod \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.097207 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-nb\") pod \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.097233 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qw2vw\" (UniqueName: \"kubernetes.io/projected/7f16880c-2d36-4873-b54b-fc7d51ca51cd-kube-api-access-qw2vw\") pod \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.097280 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-sb\") pod \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.097321 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-dns-svc\") pod \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\" (UID: \"7f16880c-2d36-4873-b54b-fc7d51ca51cd\") " Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.121510 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f16880c-2d36-4873-b54b-fc7d51ca51cd-kube-api-access-qw2vw" (OuterVolumeSpecName: "kube-api-access-qw2vw") pod "7f16880c-2d36-4873-b54b-fc7d51ca51cd" (UID: "7f16880c-2d36-4873-b54b-fc7d51ca51cd"). InnerVolumeSpecName "kube-api-access-qw2vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.159573 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-config" (OuterVolumeSpecName: "config") pod "7f16880c-2d36-4873-b54b-fc7d51ca51cd" (UID: "7f16880c-2d36-4873-b54b-fc7d51ca51cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.183306 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7f16880c-2d36-4873-b54b-fc7d51ca51cd" (UID: "7f16880c-2d36-4873-b54b-fc7d51ca51cd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.184213 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7f16880c-2d36-4873-b54b-fc7d51ca51cd" (UID: "7f16880c-2d36-4873-b54b-fc7d51ca51cd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.201281 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.201308 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qw2vw\" (UniqueName: \"kubernetes.io/projected/7f16880c-2d36-4873-b54b-fc7d51ca51cd-kube-api-access-qw2vw\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.201321 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.201330 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.202920 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7f16880c-2d36-4873-b54b-fc7d51ca51cd" (UID: "7f16880c-2d36-4873-b54b-fc7d51ca51cd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.302842 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f16880c-2d36-4873-b54b-fc7d51ca51cd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.342560 5032 generic.go:334] "Generic (PLEG): container finished" podID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerID="6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a" exitCode=0 Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.342646 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" event={"ID":"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf","Type":"ContainerDied","Data":"6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a"} Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.345103 5032 generic.go:334] "Generic (PLEG): container finished" podID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerID="0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684" exitCode=0 Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.345164 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.345175 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" event={"ID":"7f16880c-2d36-4873-b54b-fc7d51ca51cd","Type":"ContainerDied","Data":"0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684"} Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.345205 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-jcl6r" event={"ID":"7f16880c-2d36-4873-b54b-fc7d51ca51cd","Type":"ContainerDied","Data":"6a1612159bd4194212611d730964960ce32adfb7b0db099e1f35df91c929299b"} Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.345224 5032 scope.go:117] "RemoveContainer" containerID="0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.348692 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fa021065-770b-4137-aa62-be9093cccc34","Type":"ContainerStarted","Data":"5184f399ae3a7fdbcfa70ec330f8840a4311b3156dc968fa841d94da7640d443"} Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.348730 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-log" containerID="cri-o://8ee446ec39fba1cb4446fb31cc2ea9e7e21e7bd6b18cc1847dbcef5ab96d3fce" gracePeriod=30 Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.348759 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-httpd" containerID="cri-o://5184f399ae3a7fdbcfa70ec330f8840a4311b3156dc968fa841d94da7640d443" gracePeriod=30 Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.360116 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2bb020b-6e25-4841-91db-4bc1a4b21f82","Type":"ContainerStarted","Data":"959167834af5179abba8fc4dfae7de1538d3f2a85e2750f12878e0561429e0a2"} Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.360264 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-log" containerID="cri-o://9ffcc52d8df299059282d70433514eddcdbd96f7b599a187e7c5a52ce4cdf6f4" gracePeriod=30 Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.360334 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-httpd" containerID="cri-o://959167834af5179abba8fc4dfae7de1538d3f2a85e2750f12878e0561429e0a2" gracePeriod=30 Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.393174 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.393155862 podStartE2EDuration="6.393155862s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:43.389788042 +0000 UTC m=+1127.201957073" watchObservedRunningTime="2026-01-22 16:23:43.393155862 +0000 UTC m=+1127.205324893" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.406117 5032 scope.go:117] "RemoveContainer" containerID="83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.429327 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.429308798 podStartE2EDuration="6.429308798s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:23:43.412019296 +0000 UTC m=+1127.224188337" watchObservedRunningTime="2026-01-22 16:23:43.429308798 +0000 UTC m=+1127.241477829" Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.440785 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-jcl6r"] Jan 22 16:23:43 crc kubenswrapper[5032]: I0122 16:23:43.450950 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-jcl6r"] Jan 22 16:23:43 crc kubenswrapper[5032]: E0122 16:23:43.794447 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2bb020b_6e25_4841_91db_4bc1a4b21f82.slice/crio-conmon-959167834af5179abba8fc4dfae7de1538d3f2a85e2750f12878e0561429e0a2.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:23:44 crc kubenswrapper[5032]: I0122 16:23:44.372981 5032 generic.go:334] "Generic (PLEG): container finished" podID="fa021065-770b-4137-aa62-be9093cccc34" containerID="8ee446ec39fba1cb4446fb31cc2ea9e7e21e7bd6b18cc1847dbcef5ab96d3fce" exitCode=143 Jan 22 16:23:44 crc kubenswrapper[5032]: I0122 16:23:44.373275 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fa021065-770b-4137-aa62-be9093cccc34","Type":"ContainerDied","Data":"8ee446ec39fba1cb4446fb31cc2ea9e7e21e7bd6b18cc1847dbcef5ab96d3fce"} Jan 22 16:23:44 crc kubenswrapper[5032]: I0122 16:23:44.375988 5032 generic.go:334] "Generic (PLEG): container finished" podID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerID="959167834af5179abba8fc4dfae7de1538d3f2a85e2750f12878e0561429e0a2" exitCode=0 Jan 22 16:23:44 crc kubenswrapper[5032]: I0122 16:23:44.376009 5032 generic.go:334] "Generic (PLEG): container finished" podID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerID="9ffcc52d8df299059282d70433514eddcdbd96f7b599a187e7c5a52ce4cdf6f4" exitCode=143 Jan 22 16:23:44 crc kubenswrapper[5032]: I0122 16:23:44.376046 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2bb020b-6e25-4841-91db-4bc1a4b21f82","Type":"ContainerDied","Data":"959167834af5179abba8fc4dfae7de1538d3f2a85e2750f12878e0561429e0a2"} Jan 22 16:23:44 crc kubenswrapper[5032]: I0122 16:23:44.376062 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2bb020b-6e25-4841-91db-4bc1a4b21f82","Type":"ContainerDied","Data":"9ffcc52d8df299059282d70433514eddcdbd96f7b599a187e7c5a52ce4cdf6f4"} Jan 22 16:23:44 crc kubenswrapper[5032]: I0122 16:23:44.466593 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" path="/var/lib/kubelet/pods/7f16880c-2d36-4873-b54b-fc7d51ca51cd/volumes" Jan 22 16:23:45 crc kubenswrapper[5032]: I0122 16:23:45.420473 5032 generic.go:334] "Generic (PLEG): container finished" podID="fa021065-770b-4137-aa62-be9093cccc34" containerID="5184f399ae3a7fdbcfa70ec330f8840a4311b3156dc968fa841d94da7640d443" exitCode=0 Jan 22 16:23:45 crc kubenswrapper[5032]: I0122 16:23:45.420577 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fa021065-770b-4137-aa62-be9093cccc34","Type":"ContainerDied","Data":"5184f399ae3a7fdbcfa70ec330f8840a4311b3156dc968fa841d94da7640d443"} Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.816238 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-75d7ccfbb5-tsmmc"] Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.872242 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68bf69fb-rdlps"] Jan 22 16:23:46 crc kubenswrapper[5032]: E0122 16:23:46.872743 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerName="dnsmasq-dns" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.872766 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerName="dnsmasq-dns" Jan 22 16:23:46 crc kubenswrapper[5032]: E0122 16:23:46.872789 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerName="init" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.872799 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerName="init" Jan 22 16:23:46 crc kubenswrapper[5032]: E0122 16:23:46.872814 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7808724d-5ed5-4925-8345-80fb4c765340" containerName="init" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.872822 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7808724d-5ed5-4925-8345-80fb4c765340" containerName="init" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.873047 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f16880c-2d36-4873-b54b-fc7d51ca51cd" containerName="dnsmasq-dns" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.873075 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="7808724d-5ed5-4925-8345-80fb4c765340" containerName="init" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.874121 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.876047 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.902139 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68bf69fb-rdlps"] Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965199 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8bf326d-2513-4e16-8c33-c23fa702de8d-logs\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965250 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-tls-certs\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965378 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-secret-key\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965429 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6dddcc7fbc-q5nc8"] Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965610 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-scripts\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965679 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-config-data\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965734 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw7pp\" (UniqueName: \"kubernetes.io/projected/a8bf326d-2513-4e16-8c33-c23fa702de8d-kube-api-access-cw7pp\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:46 crc kubenswrapper[5032]: I0122 16:23:46.965765 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-combined-ca-bundle\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.001987 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6c7b5bf75b-t5dp6"] Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.008357 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.030198 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c7b5bf75b-t5dp6"] Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.067754 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-config-data\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.067812 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw7pp\" (UniqueName: \"kubernetes.io/projected/a8bf326d-2513-4e16-8c33-c23fa702de8d-kube-api-access-cw7pp\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.067831 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-combined-ca-bundle\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.067929 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-horizon-tls-certs\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.067965 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t4qc\" (UniqueName: \"kubernetes.io/projected/f9991301-f3df-4fe5-872f-dfba0842580e-kube-api-access-9t4qc\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.067997 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8bf326d-2513-4e16-8c33-c23fa702de8d-logs\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068020 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-tls-certs\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068046 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-secret-key\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068068 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-horizon-secret-key\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068088 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9991301-f3df-4fe5-872f-dfba0842580e-logs\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068111 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f9991301-f3df-4fe5-872f-dfba0842580e-scripts\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068151 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f9991301-f3df-4fe5-872f-dfba0842580e-config-data\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068169 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-combined-ca-bundle\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068196 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-scripts\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.068958 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-scripts\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.069221 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8bf326d-2513-4e16-8c33-c23fa702de8d-logs\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.073189 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-config-data\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.074728 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-tls-certs\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.077956 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-secret-key\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.078575 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-combined-ca-bundle\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.090909 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw7pp\" (UniqueName: \"kubernetes.io/projected/a8bf326d-2513-4e16-8c33-c23fa702de8d-kube-api-access-cw7pp\") pod \"horizon-68bf69fb-rdlps\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.171618 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-horizon-secret-key\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.171657 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9991301-f3df-4fe5-872f-dfba0842580e-logs\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.171684 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f9991301-f3df-4fe5-872f-dfba0842580e-scripts\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.171725 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f9991301-f3df-4fe5-872f-dfba0842580e-config-data\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.171746 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-combined-ca-bundle\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.171799 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-horizon-tls-certs\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.171828 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t4qc\" (UniqueName: \"kubernetes.io/projected/f9991301-f3df-4fe5-872f-dfba0842580e-kube-api-access-9t4qc\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.175427 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-horizon-secret-key\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.178828 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-horizon-tls-certs\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.179941 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9991301-f3df-4fe5-872f-dfba0842580e-logs\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.180087 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f9991301-f3df-4fe5-872f-dfba0842580e-config-data\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.180701 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f9991301-f3df-4fe5-872f-dfba0842580e-scripts\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.183790 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9991301-f3df-4fe5-872f-dfba0842580e-combined-ca-bundle\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.188246 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t4qc\" (UniqueName: \"kubernetes.io/projected/f9991301-f3df-4fe5-872f-dfba0842580e-kube-api-access-9t4qc\") pod \"horizon-6c7b5bf75b-t5dp6\" (UID: \"f9991301-f3df-4fe5-872f-dfba0842580e\") " pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.218155 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.341922 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.437788 5032 generic.go:334] "Generic (PLEG): container finished" podID="5e9e211d-267e-4d76-be04-75a83bf35c14" containerID="174bfa085d80eeecb78b6cadbd42931be570bb7c8eb00a34d30cabd4b1a658a9" exitCode=0 Jan 22 16:23:47 crc kubenswrapper[5032]: I0122 16:23:47.437889 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8w25f" event={"ID":"5e9e211d-267e-4d76-be04-75a83bf35c14","Type":"ContainerDied","Data":"174bfa085d80eeecb78b6cadbd42931be570bb7c8eb00a34d30cabd4b1a658a9"} Jan 22 16:23:55 crc kubenswrapper[5032]: E0122 16:23:55.105264 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Jan 22 16:23:55 crc kubenswrapper[5032]: E0122 16:23:55.106564 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fhgjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-ng56d_openstack(18e81df2-1e39-45a9-9ea9-c9b9a54b92f7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:23:55 crc kubenswrapper[5032]: E0122 16:23:55.108457 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-ng56d" podUID="18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" Jan 22 16:23:55 crc kubenswrapper[5032]: E0122 16:23:55.510043 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-ng56d" podUID="18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.440772 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.441469 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55chf4h54fhfh5cch67bh568hbch597hc9hdchcfh675h98h597h57ch648h6h5b6h54h7h55bh5bfhb5h87h569h64ch85h5f5h698h5bfh9fq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v4fn6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-6595764dd9-5wn45_openstack(da9d98b4-fc68-4ea3-adc0-8992f52076d2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.444249 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-6595764dd9-5wn45" podUID="da9d98b4-fc68-4ea3-adc0-8992f52076d2" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.445239 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.445345 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n567h657h9h575hch6h68fhcbh64fh5cfh59fh8dhd4h5c4h64fh645h567h99h678hfchb5h587h68h696h67dh678h677hffh674hf6h5d7h668q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rz2g6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-6dddcc7fbc-q5nc8_openstack(da70765e-3c15-45e5-af67-893b3910a90f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.447836 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-6dddcc7fbc-q5nc8" podUID="da70765e-3c15-45e5-af67-893b3910a90f" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.488713 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.488917 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5bh6bh6h9h68dh58ch5b8h66dh577h665hdfh549h688h9bh65h554h657h658h588h99h647hc8h74h64fh54fh94hd6h5b7h595h8ch5d4hfq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kfwpd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-75d7ccfbb5-tsmmc_openstack(b761c2e8-3ed5-40ce-8ae8-eb181db52692): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:24:01 crc kubenswrapper[5032]: E0122 16:24:01.490842 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-75d7ccfbb5-tsmmc" podUID="b761c2e8-3ed5-40ce-8ae8-eb181db52692" Jan 22 16:24:03 crc kubenswrapper[5032]: I0122 16:24:03.016761 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:24:03 crc kubenswrapper[5032]: I0122 16:24:03.017276 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:24:08 crc kubenswrapper[5032]: I0122 16:24:08.337070 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:08 crc kubenswrapper[5032]: I0122 16:24:08.337513 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:08 crc kubenswrapper[5032]: I0122 16:24:08.493683 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 16:24:08 crc kubenswrapper[5032]: I0122 16:24:08.495946 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.434125 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.553163 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-config-data\") pod \"5e9e211d-267e-4d76-be04-75a83bf35c14\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.553265 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-credential-keys\") pod \"5e9e211d-267e-4d76-be04-75a83bf35c14\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.553302 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-combined-ca-bundle\") pod \"5e9e211d-267e-4d76-be04-75a83bf35c14\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.553335 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-fernet-keys\") pod \"5e9e211d-267e-4d76-be04-75a83bf35c14\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.553523 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-scripts\") pod \"5e9e211d-267e-4d76-be04-75a83bf35c14\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.553562 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89ftp\" (UniqueName: \"kubernetes.io/projected/5e9e211d-267e-4d76-be04-75a83bf35c14-kube-api-access-89ftp\") pod \"5e9e211d-267e-4d76-be04-75a83bf35c14\" (UID: \"5e9e211d-267e-4d76-be04-75a83bf35c14\") " Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.566056 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-scripts" (OuterVolumeSpecName: "scripts") pod "5e9e211d-267e-4d76-be04-75a83bf35c14" (UID: "5e9e211d-267e-4d76-be04-75a83bf35c14"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.568996 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5e9e211d-267e-4d76-be04-75a83bf35c14" (UID: "5e9e211d-267e-4d76-be04-75a83bf35c14"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.578063 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e9e211d-267e-4d76-be04-75a83bf35c14-kube-api-access-89ftp" (OuterVolumeSpecName: "kube-api-access-89ftp") pod "5e9e211d-267e-4d76-be04-75a83bf35c14" (UID: "5e9e211d-267e-4d76-be04-75a83bf35c14"). InnerVolumeSpecName "kube-api-access-89ftp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.585027 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5e9e211d-267e-4d76-be04-75a83bf35c14" (UID: "5e9e211d-267e-4d76-be04-75a83bf35c14"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.608360 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e9e211d-267e-4d76-be04-75a83bf35c14" (UID: "5e9e211d-267e-4d76-be04-75a83bf35c14"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.622447 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-config-data" (OuterVolumeSpecName: "config-data") pod "5e9e211d-267e-4d76-be04-75a83bf35c14" (UID: "5e9e211d-267e-4d76-be04-75a83bf35c14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.637928 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8w25f" event={"ID":"5e9e211d-267e-4d76-be04-75a83bf35c14","Type":"ContainerDied","Data":"73c942a887999f5be7cd708725b26ea57fcb3becda72e6c977803bb164ea284f"} Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.637987 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73c942a887999f5be7cd708725b26ea57fcb3becda72e6c977803bb164ea284f" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.638241 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8w25f" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.655951 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.656267 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89ftp\" (UniqueName: \"kubernetes.io/projected/5e9e211d-267e-4d76-be04-75a83bf35c14-kube-api-access-89ftp\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.656279 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.656289 5032 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.656298 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:10 crc kubenswrapper[5032]: I0122 16:24:10.656308 5032 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e9e211d-267e-4d76-be04-75a83bf35c14-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.078904 5032 scope.go:117] "RemoveContainer" containerID="0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.079423 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684\": container with ID starting with 0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684 not found: ID does not exist" containerID="0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.079472 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684"} err="failed to get container status \"0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684\": rpc error: code = NotFound desc = could not find container \"0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684\": container with ID starting with 0de06f89d4bafe9166fadd4b7aa22e90d2b28125aa7b9b09b49d99c4bc166684 not found: ID does not exist" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.079503 5032 scope.go:117] "RemoveContainer" containerID="83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.079900 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5\": container with ID starting with 83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5 not found: ID does not exist" containerID="83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.079944 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5"} err="failed to get container status \"83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5\": rpc error: code = NotFound desc = could not find container \"83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5\": container with ID starting with 83d5accae5649939ff69a16cb5ce4fcb3fd92a3b2fbbfc6b8e6416d6f66ecaf5 not found: ID does not exist" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.080528 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.080690 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jz2k5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-jb65g_openstack(175d212f-92e2-468a-ac51-5c821d160842): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.085166 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-jb65g" podUID="175d212f-92e2-468a-ac51-5c821d160842" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.143478 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.153262 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.161035 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.223312 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.236897 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285284 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285335 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da70765e-3c15-45e5-af67-893b3910a90f-horizon-secret-key\") pod \"da70765e-3c15-45e5-af67-893b3910a90f\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285361 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-config-data\") pod \"da70765e-3c15-45e5-af67-893b3910a90f\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285403 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-httpd-run\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285462 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-config-data\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285505 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da9d98b4-fc68-4ea3-adc0-8992f52076d2-horizon-secret-key\") pod \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285524 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-logs\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285554 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-scripts\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285575 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-logs\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285598 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz2g6\" (UniqueName: \"kubernetes.io/projected/da70765e-3c15-45e5-af67-893b3910a90f-kube-api-access-rz2g6\") pod \"da70765e-3c15-45e5-af67-893b3910a90f\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285639 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-scripts\") pod \"da70765e-3c15-45e5-af67-893b3910a90f\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285670 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da9d98b4-fc68-4ea3-adc0-8992f52076d2-logs\") pod \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285697 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285724 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4fn6\" (UniqueName: \"kubernetes.io/projected/da9d98b4-fc68-4ea3-adc0-8992f52076d2-kube-api-access-v4fn6\") pod \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285752 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-scripts\") pod \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285787 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-config-data\") pod \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\" (UID: \"da9d98b4-fc68-4ea3-adc0-8992f52076d2\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285809 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-combined-ca-bundle\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285874 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54j2t\" (UniqueName: \"kubernetes.io/projected/fa021065-770b-4137-aa62-be9093cccc34-kube-api-access-54j2t\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285897 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-httpd-run\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285927 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-scripts\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285958 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-combined-ca-bundle\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.285981 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da70765e-3c15-45e5-af67-893b3910a90f-logs\") pod \"da70765e-3c15-45e5-af67-893b3910a90f\" (UID: \"da70765e-3c15-45e5-af67-893b3910a90f\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.286006 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-config-data\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.286039 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-public-tls-certs\") pod \"fa021065-770b-4137-aa62-be9093cccc34\" (UID: \"fa021065-770b-4137-aa62-be9093cccc34\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.286061 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-internal-tls-certs\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.286089 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lf84z\" (UniqueName: \"kubernetes.io/projected/f2bb020b-6e25-4841-91db-4bc1a4b21f82-kube-api-access-lf84z\") pod \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\" (UID: \"f2bb020b-6e25-4841-91db-4bc1a4b21f82\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.286170 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-logs" (OuterVolumeSpecName: "logs") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.286539 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.286618 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-config-data" (OuterVolumeSpecName: "config-data") pod "da70765e-3c15-45e5-af67-893b3910a90f" (UID: "da70765e-3c15-45e5-af67-893b3910a90f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.287046 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.288206 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da70765e-3c15-45e5-af67-893b3910a90f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "da70765e-3c15-45e5-af67-893b3910a90f" (UID: "da70765e-3c15-45e5-af67-893b3910a90f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.288726 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.288965 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-logs" (OuterVolumeSpecName: "logs") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.289929 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.290236 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da70765e-3c15-45e5-af67-893b3910a90f-logs" (OuterVolumeSpecName: "logs") pod "da70765e-3c15-45e5-af67-893b3910a90f" (UID: "da70765e-3c15-45e5-af67-893b3910a90f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.290750 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-scripts" (OuterVolumeSpecName: "scripts") pod "da9d98b4-fc68-4ea3-adc0-8992f52076d2" (UID: "da9d98b4-fc68-4ea3-adc0-8992f52076d2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.292038 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2bb020b-6e25-4841-91db-4bc1a4b21f82-kube-api-access-lf84z" (OuterVolumeSpecName: "kube-api-access-lf84z") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "kube-api-access-lf84z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.292824 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-scripts" (OuterVolumeSpecName: "scripts") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.293020 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-scripts" (OuterVolumeSpecName: "scripts") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.293829 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-config-data" (OuterVolumeSpecName: "config-data") pod "da9d98b4-fc68-4ea3-adc0-8992f52076d2" (UID: "da9d98b4-fc68-4ea3-adc0-8992f52076d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.294283 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da9d98b4-fc68-4ea3-adc0-8992f52076d2-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "da9d98b4-fc68-4ea3-adc0-8992f52076d2" (UID: "da9d98b4-fc68-4ea3-adc0-8992f52076d2"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.294373 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-scripts" (OuterVolumeSpecName: "scripts") pod "da70765e-3c15-45e5-af67-893b3910a90f" (UID: "da70765e-3c15-45e5-af67-893b3910a90f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.294944 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da9d98b4-fc68-4ea3-adc0-8992f52076d2-logs" (OuterVolumeSpecName: "logs") pod "da9d98b4-fc68-4ea3-adc0-8992f52076d2" (UID: "da9d98b4-fc68-4ea3-adc0-8992f52076d2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.296313 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.297756 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da70765e-3c15-45e5-af67-893b3910a90f-kube-api-access-rz2g6" (OuterVolumeSpecName: "kube-api-access-rz2g6") pod "da70765e-3c15-45e5-af67-893b3910a90f" (UID: "da70765e-3c15-45e5-af67-893b3910a90f"). InnerVolumeSpecName "kube-api-access-rz2g6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.298169 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa021065-770b-4137-aa62-be9093cccc34-kube-api-access-54j2t" (OuterVolumeSpecName: "kube-api-access-54j2t") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "kube-api-access-54j2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.298937 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da9d98b4-fc68-4ea3-adc0-8992f52076d2-kube-api-access-v4fn6" (OuterVolumeSpecName: "kube-api-access-v4fn6") pod "da9d98b4-fc68-4ea3-adc0-8992f52076d2" (UID: "da9d98b4-fc68-4ea3-adc0-8992f52076d2"). InnerVolumeSpecName "kube-api-access-v4fn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.333196 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.348056 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.353459 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.357887 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.363514 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-config-data" (OuterVolumeSpecName: "config-data") pod "fa021065-770b-4137-aa62-be9093cccc34" (UID: "fa021065-770b-4137-aa62-be9093cccc34"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.368814 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-config-data" (OuterVolumeSpecName: "config-data") pod "f2bb020b-6e25-4841-91db-4bc1a4b21f82" (UID: "f2bb020b-6e25-4841-91db-4bc1a4b21f82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387052 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b761c2e8-3ed5-40ce-8ae8-eb181db52692-logs\") pod \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387137 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwpd\" (UniqueName: \"kubernetes.io/projected/b761c2e8-3ed5-40ce-8ae8-eb181db52692-kube-api-access-kfwpd\") pod \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387167 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-scripts\") pod \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387242 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-config-data\") pod \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387284 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b761c2e8-3ed5-40ce-8ae8-eb181db52692-horizon-secret-key\") pod \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\" (UID: \"b761c2e8-3ed5-40ce-8ae8-eb181db52692\") " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387355 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b761c2e8-3ed5-40ce-8ae8-eb181db52692-logs" (OuterVolumeSpecName: "logs") pod "b761c2e8-3ed5-40ce-8ae8-eb181db52692" (UID: "b761c2e8-3ed5-40ce-8ae8-eb181db52692"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387694 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54j2t\" (UniqueName: \"kubernetes.io/projected/fa021065-770b-4137-aa62-be9093cccc34-kube-api-access-54j2t\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387706 5032 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2bb020b-6e25-4841-91db-4bc1a4b21f82-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387715 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387726 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387734 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da70765e-3c15-45e5-af67-893b3910a90f-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387743 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387753 5032 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387761 5032 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387769 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lf84z\" (UniqueName: \"kubernetes.io/projected/f2bb020b-6e25-4841-91db-4bc1a4b21f82-kube-api-access-lf84z\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387777 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b761c2e8-3ed5-40ce-8ae8-eb181db52692-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387802 5032 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387814 5032 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da70765e-3c15-45e5-af67-893b3910a90f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387822 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387830 5032 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387838 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387846 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa021065-770b-4137-aa62-be9093cccc34-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387855 5032 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da9d98b4-fc68-4ea3-adc0-8992f52076d2-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387879 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa021065-770b-4137-aa62-be9093cccc34-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387888 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz2g6\" (UniqueName: \"kubernetes.io/projected/da70765e-3c15-45e5-af67-893b3910a90f-kube-api-access-rz2g6\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387896 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da70765e-3c15-45e5-af67-893b3910a90f-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387904 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da9d98b4-fc68-4ea3-adc0-8992f52076d2-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387903 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-config-data" (OuterVolumeSpecName: "config-data") pod "b761c2e8-3ed5-40ce-8ae8-eb181db52692" (UID: "b761c2e8-3ed5-40ce-8ae8-eb181db52692"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387923 5032 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387936 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4fn6\" (UniqueName: \"kubernetes.io/projected/da9d98b4-fc68-4ea3-adc0-8992f52076d2-kube-api-access-v4fn6\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387947 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387960 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da9d98b4-fc68-4ea3-adc0-8992f52076d2-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.387970 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2bb020b-6e25-4841-91db-4bc1a4b21f82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.388648 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-scripts" (OuterVolumeSpecName: "scripts") pod "b761c2e8-3ed5-40ce-8ae8-eb181db52692" (UID: "b761c2e8-3ed5-40ce-8ae8-eb181db52692"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.391533 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b761c2e8-3ed5-40ce-8ae8-eb181db52692-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b761c2e8-3ed5-40ce-8ae8-eb181db52692" (UID: "b761c2e8-3ed5-40ce-8ae8-eb181db52692"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.392186 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b761c2e8-3ed5-40ce-8ae8-eb181db52692-kube-api-access-kfwpd" (OuterVolumeSpecName: "kube-api-access-kfwpd") pod "b761c2e8-3ed5-40ce-8ae8-eb181db52692" (UID: "b761c2e8-3ed5-40ce-8ae8-eb181db52692"). InnerVolumeSpecName "kube-api-access-kfwpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.406734 5032 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.420907 5032 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.489021 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.489049 5032 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b761c2e8-3ed5-40ce-8ae8-eb181db52692-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.489058 5032 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.489069 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwpd\" (UniqueName: \"kubernetes.io/projected/b761c2e8-3ed5-40ce-8ae8-eb181db52692-kube-api-access-kfwpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.489077 5032 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.489085 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b761c2e8-3ed5-40ce-8ae8-eb181db52692-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.514587 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8w25f"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.523089 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8w25f"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.656303 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-mk9tw"] Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.657368 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e9e211d-267e-4d76-be04-75a83bf35c14" containerName="keystone-bootstrap" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657406 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e9e211d-267e-4d76-be04-75a83bf35c14" containerName="keystone-bootstrap" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.657426 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-httpd" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657436 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-httpd" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.657454 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-log" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657462 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-log" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.657486 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-log" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657495 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-log" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.657523 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-httpd" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657531 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-httpd" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657832 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-httpd" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657877 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa021065-770b-4137-aa62-be9093cccc34" containerName="glance-log" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657891 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-log" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657908 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e9e211d-267e-4d76-be04-75a83bf35c14" containerName="keystone-bootstrap" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.657927 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" containerName="glance-httpd" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.658637 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.660694 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.661335 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.661406 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wncsb" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.661831 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.661852 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.684560 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mk9tw"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.686747 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dddcc7fbc-q5nc8" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.687940 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dddcc7fbc-q5nc8" event={"ID":"da70765e-3c15-45e5-af67-893b3910a90f","Type":"ContainerDied","Data":"ac004aafa5a8fc5e05e268c01c222855646e7bf5a0d4c14e2cff95a36a7ec892"} Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.691439 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6595764dd9-5wn45" event={"ID":"da9d98b4-fc68-4ea3-adc0-8992f52076d2","Type":"ContainerDied","Data":"ff83683634985db7876b08f2040e2287322353dd1c56e8f172b8ec67aa83f763"} Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.691537 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6595764dd9-5wn45" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.694567 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75d7ccfbb5-tsmmc" event={"ID":"b761c2e8-3ed5-40ce-8ae8-eb181db52692","Type":"ContainerDied","Data":"43286fe0004b5b2a1c0853f3d233af9dd48775a99072c0183daf90e5c57858a3"} Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.694622 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75d7ccfbb5-tsmmc" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.697427 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fa021065-770b-4137-aa62-be9093cccc34","Type":"ContainerDied","Data":"9ca27bcffe8fd25f80ca8f35b7683cfdad3b4c1c96ca40fcec6f598169be63aa"} Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.697482 5032 scope.go:117] "RemoveContainer" containerID="5184f399ae3a7fdbcfa70ec330f8840a4311b3156dc968fa841d94da7640d443" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.697619 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.705443 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2bb020b-6e25-4841-91db-4bc1a4b21f82","Type":"ContainerDied","Data":"f188b7031d49e5fbc925b926ff0219e7167e430153dee2febd85344dbae973c5"} Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.705498 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: E0122 16:24:11.709995 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-jb65g" podUID="175d212f-92e2-468a-ac51-5c821d160842" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.775425 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.794225 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-config-data\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.794532 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-credential-keys\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.794628 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-fernet-keys\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.794746 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-combined-ca-bundle\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.794834 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-scripts\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.794944 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4ls9\" (UniqueName: \"kubernetes.io/projected/6edd56b3-2854-4b22-8d59-b620200d7cfe-kube-api-access-z4ls9\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.802967 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.812060 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.813753 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.820243 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-l9wcj" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.820537 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.820694 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.823231 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.825028 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.847007 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-75d7ccfbb5-tsmmc"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.875118 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-75d7ccfbb5-tsmmc"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.892083 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6dddcc7fbc-q5nc8"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896455 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzxwr\" (UniqueName: \"kubernetes.io/projected/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-kube-api-access-vzxwr\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896497 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896543 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896596 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-logs\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896637 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-credential-keys\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896659 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896687 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-fernet-keys\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896733 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-combined-ca-bundle\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896759 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-scripts\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896790 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4ls9\" (UniqueName: \"kubernetes.io/projected/6edd56b3-2854-4b22-8d59-b620200d7cfe-kube-api-access-z4ls9\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896829 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-config-data\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896846 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896907 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-config-data\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.896936 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-scripts\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.899096 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6dddcc7fbc-q5nc8"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.902716 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-fernet-keys\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.903399 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-scripts\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.903887 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-combined-ca-bundle\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.904270 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-credential-keys\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.916290 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-config-data\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.917264 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4ls9\" (UniqueName: \"kubernetes.io/projected/6edd56b3-2854-4b22-8d59-b620200d7cfe-kube-api-access-z4ls9\") pod \"keystone-bootstrap-mk9tw\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.932747 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.944769 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.956046 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.957405 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.958801 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.959993 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.966801 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.982184 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6595764dd9-5wn45"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.984478 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.988652 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6595764dd9-5wn45"] Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.998300 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.998546 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.998652 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-logs\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.998784 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.998937 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.999079 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.999353 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-logs\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:11 crc kubenswrapper[5032]: I0122 16:24:11.999497 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000051 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000140 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:11.999973 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-logs\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000313 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-config-data\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000412 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000511 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-scripts\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000597 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtmfb\" (UniqueName: \"kubernetes.io/projected/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-kube-api-access-xtmfb\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000681 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000771 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzxwr\" (UniqueName: \"kubernetes.io/projected/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-kube-api-access-vzxwr\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.000849 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.001021 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.003336 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.003921 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-scripts\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.004804 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-config-data\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.004984 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.020513 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzxwr\" (UniqueName: \"kubernetes.io/projected/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-kube-api-access-vzxwr\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.030498 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.102961 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.103043 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.103087 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.103123 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-logs\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.103166 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.103214 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.103258 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtmfb\" (UniqueName: \"kubernetes.io/projected/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-kube-api-access-xtmfb\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.103282 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.104243 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.104705 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-logs\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.104831 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.107645 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.110108 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.111100 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.115622 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.121799 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtmfb\" (UniqueName: \"kubernetes.io/projected/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-kube-api-access-xtmfb\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.131466 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.137022 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.351951 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.470155 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e9e211d-267e-4d76-be04-75a83bf35c14" path="/var/lib/kubelet/pods/5e9e211d-267e-4d76-be04-75a83bf35c14/volumes" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.470875 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b761c2e8-3ed5-40ce-8ae8-eb181db52692" path="/var/lib/kubelet/pods/b761c2e8-3ed5-40ce-8ae8-eb181db52692/volumes" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.471304 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da70765e-3c15-45e5-af67-893b3910a90f" path="/var/lib/kubelet/pods/da70765e-3c15-45e5-af67-893b3910a90f/volumes" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.471692 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da9d98b4-fc68-4ea3-adc0-8992f52076d2" path="/var/lib/kubelet/pods/da9d98b4-fc68-4ea3-adc0-8992f52076d2/volumes" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.472641 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2bb020b-6e25-4841-91db-4bc1a4b21f82" path="/var/lib/kubelet/pods/f2bb020b-6e25-4841-91db-4bc1a4b21f82/volumes" Jan 22 16:24:12 crc kubenswrapper[5032]: I0122 16:24:12.473453 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa021065-770b-4137-aa62-be9093cccc34" path="/var/lib/kubelet/pods/fa021065-770b-4137-aa62-be9093cccc34/volumes" Jan 22 16:24:12 crc kubenswrapper[5032]: E0122 16:24:12.917564 5032 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 22 16:24:12 crc kubenswrapper[5032]: E0122 16:24:12.917766 5032 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mrjv4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-dxmk2_openstack(857e311d-5f3f-4c81-9f86-6d0a3c9e0307): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 16:24:12 crc kubenswrapper[5032]: E0122 16:24:12.919475 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-dxmk2" podUID="857e311d-5f3f-4c81-9f86-6d0a3c9e0307" Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.022050 5032 scope.go:117] "RemoveContainer" containerID="8ee446ec39fba1cb4446fb31cc2ea9e7e21e7bd6b18cc1847dbcef5ab96d3fce" Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.136470 5032 scope.go:117] "RemoveContainer" containerID="959167834af5179abba8fc4dfae7de1538d3f2a85e2750f12878e0561429e0a2" Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.187735 5032 scope.go:117] "RemoveContainer" containerID="9ffcc52d8df299059282d70433514eddcdbd96f7b599a187e7c5a52ce4cdf6f4" Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.487475 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c7b5bf75b-t5dp6"] Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.764145 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ng56d" event={"ID":"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7","Type":"ContainerStarted","Data":"138bf192d4d9cd4c98df6e324c87216f0d2deb4d054507fb90afd38369a65db0"} Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.767433 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" event={"ID":"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf","Type":"ContainerStarted","Data":"98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d"} Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.768123 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.769394 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c7b5bf75b-t5dp6" event={"ID":"f9991301-f3df-4fe5-872f-dfba0842580e","Type":"ContainerStarted","Data":"a1eaffcf5004da7d80d80d7c8c0ccf7b72df023ac75e4e215cc0f7251edfce93"} Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.770994 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerStarted","Data":"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3"} Jan 22 16:24:13 crc kubenswrapper[5032]: E0122 16:24:13.772756 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-dxmk2" podUID="857e311d-5f3f-4c81-9f86-6d0a3c9e0307" Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.791958 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-ng56d" podStartSLOduration=2.902914253 podStartE2EDuration="36.791938301s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="2026-01-22 16:23:39.146889811 +0000 UTC m=+1122.959058842" lastFinishedPulling="2026-01-22 16:24:13.035913869 +0000 UTC m=+1156.848082890" observedRunningTime="2026-01-22 16:24:13.787347848 +0000 UTC m=+1157.599516909" watchObservedRunningTime="2026-01-22 16:24:13.791938301 +0000 UTC m=+1157.604107332" Jan 22 16:24:13 crc kubenswrapper[5032]: I0122 16:24:13.819272 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" podStartSLOduration=33.81923262 podStartE2EDuration="33.81923262s" podCreationTimestamp="2026-01-22 16:23:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:13.808400781 +0000 UTC m=+1157.620569832" watchObservedRunningTime="2026-01-22 16:24:13.81923262 +0000 UTC m=+1157.631401661" Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.137573 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68bf69fb-rdlps"] Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.246429 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mk9tw"] Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.335751 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:24:14 crc kubenswrapper[5032]: W0122 16:24:14.351996 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf750cd9d_1983_4ff8_a510_fc773b3fcfcf.slice/crio-0401bf6ac9ba45d5923a2cca342c7ed3c14b9913d9aee1c8ba142f8d43ab8915 WatchSource:0}: Error finding container 0401bf6ac9ba45d5923a2cca342c7ed3c14b9913d9aee1c8ba142f8d43ab8915: Status 404 returned error can't find the container with id 0401bf6ac9ba45d5923a2cca342c7ed3c14b9913d9aee1c8ba142f8d43ab8915 Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.785495 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f750cd9d-1983-4ff8-a510-fc773b3fcfcf","Type":"ContainerStarted","Data":"0401bf6ac9ba45d5923a2cca342c7ed3c14b9913d9aee1c8ba142f8d43ab8915"} Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.786497 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bf69fb-rdlps" event={"ID":"a8bf326d-2513-4e16-8c33-c23fa702de8d","Type":"ContainerStarted","Data":"e5613f8edd73a8e2de1177bd8f475dd6a766cddd6141b3960c72ce0e0144ddc6"} Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.788266 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mk9tw" event={"ID":"6edd56b3-2854-4b22-8d59-b620200d7cfe","Type":"ContainerStarted","Data":"0185960f97620fac9d4721bdb8a6837e4405402ad3a81960afc937ec6f06b8cb"} Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.788308 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mk9tw" event={"ID":"6edd56b3-2854-4b22-8d59-b620200d7cfe","Type":"ContainerStarted","Data":"1f25c7a52fd7bfb4957cd256d81397be8dd73bfa61f8fb1f3208e6997c187c17"} Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.793967 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c7b5bf75b-t5dp6" event={"ID":"f9991301-f3df-4fe5-872f-dfba0842580e","Type":"ContainerStarted","Data":"8a9ce913c8c012ab827ae48539d20ef16b752912bf456aea88e98ef9958f3f91"} Jan 22 16:24:14 crc kubenswrapper[5032]: I0122 16:24:14.807313 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-mk9tw" podStartSLOduration=3.807295116 podStartE2EDuration="3.807295116s" podCreationTimestamp="2026-01-22 16:24:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:14.805794706 +0000 UTC m=+1158.617963757" watchObservedRunningTime="2026-01-22 16:24:14.807295116 +0000 UTC m=+1158.619464147" Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.254738 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.802612 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0116e396-0fd1-4a28-bcc7-d2712c0cfa93","Type":"ContainerStarted","Data":"08835a8819cdd5bd9e77b70a2c90745b94e2c11a682b53f04742c7e61e48e584"} Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.807473 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c7b5bf75b-t5dp6" event={"ID":"f9991301-f3df-4fe5-872f-dfba0842580e","Type":"ContainerStarted","Data":"bff95d24c62055101c418fbf35e4ba5504044cbc987e9ad3515d070a235278f0"} Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.814601 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f750cd9d-1983-4ff8-a510-fc773b3fcfcf","Type":"ContainerStarted","Data":"732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb"} Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.816711 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bf69fb-rdlps" event={"ID":"a8bf326d-2513-4e16-8c33-c23fa702de8d","Type":"ContainerStarted","Data":"c19e7c4f249df844077ca639ddf4702d340f082798870e59ea1c0a5891e3d050"} Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.816746 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bf69fb-rdlps" event={"ID":"a8bf326d-2513-4e16-8c33-c23fa702de8d","Type":"ContainerStarted","Data":"1f79f0f2126156b0ebaf59b79d17e3c6eec85d2cf121f7c2ba7096f5f7293ffe"} Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.818275 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerStarted","Data":"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df"} Jan 22 16:24:15 crc kubenswrapper[5032]: I0122 16:24:15.834111 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6c7b5bf75b-t5dp6" podStartSLOduration=29.006967173 podStartE2EDuration="29.834094596s" podCreationTimestamp="2026-01-22 16:23:46 +0000 UTC" firstStartedPulling="2026-01-22 16:24:13.497793967 +0000 UTC m=+1157.309962998" lastFinishedPulling="2026-01-22 16:24:14.32492139 +0000 UTC m=+1158.137090421" observedRunningTime="2026-01-22 16:24:15.827493999 +0000 UTC m=+1159.639663050" watchObservedRunningTime="2026-01-22 16:24:15.834094596 +0000 UTC m=+1159.646263627" Jan 22 16:24:16 crc kubenswrapper[5032]: I0122 16:24:16.834154 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0116e396-0fd1-4a28-bcc7-d2712c0cfa93","Type":"ContainerStarted","Data":"9b3a151aa981201420cd9a43bd8ee927ffe100bfaf449bb58f6c6d31dcfe5d1e"} Jan 22 16:24:16 crc kubenswrapper[5032]: I0122 16:24:16.841028 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f750cd9d-1983-4ff8-a510-fc773b3fcfcf","Type":"ContainerStarted","Data":"82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8"} Jan 22 16:24:16 crc kubenswrapper[5032]: I0122 16:24:16.872032 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.872005265 podStartE2EDuration="5.872005265s" podCreationTimestamp="2026-01-22 16:24:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:16.860542529 +0000 UTC m=+1160.672711580" watchObservedRunningTime="2026-01-22 16:24:16.872005265 +0000 UTC m=+1160.684174296" Jan 22 16:24:16 crc kubenswrapper[5032]: I0122 16:24:16.892126 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-68bf69fb-rdlps" podStartSLOduration=29.939641779 podStartE2EDuration="30.892085162s" podCreationTimestamp="2026-01-22 16:23:46 +0000 UTC" firstStartedPulling="2026-01-22 16:24:14.152246423 +0000 UTC m=+1157.964415454" lastFinishedPulling="2026-01-22 16:24:15.104689806 +0000 UTC m=+1158.916858837" observedRunningTime="2026-01-22 16:24:16.884673674 +0000 UTC m=+1160.696842715" watchObservedRunningTime="2026-01-22 16:24:16.892085162 +0000 UTC m=+1160.704254193" Jan 22 16:24:17 crc kubenswrapper[5032]: I0122 16:24:17.222931 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:24:17 crc kubenswrapper[5032]: I0122 16:24:17.223057 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:24:17 crc kubenswrapper[5032]: I0122 16:24:17.342832 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:24:17 crc kubenswrapper[5032]: I0122 16:24:17.342897 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:24:17 crc kubenswrapper[5032]: I0122 16:24:17.850275 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0116e396-0fd1-4a28-bcc7-d2712c0cfa93","Type":"ContainerStarted","Data":"b6c7424f47d322a90c1d471d03c82a562892675461e2a2cf65891b84df5835d6"} Jan 22 16:24:17 crc kubenswrapper[5032]: I0122 16:24:17.885621 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.885600794 podStartE2EDuration="6.885600794s" podCreationTimestamp="2026-01-22 16:24:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:17.877311022 +0000 UTC m=+1161.689480053" watchObservedRunningTime="2026-01-22 16:24:17.885600794 +0000 UTC m=+1161.697769825" Jan 22 16:24:20 crc kubenswrapper[5032]: I0122 16:24:20.938853 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:24:21 crc kubenswrapper[5032]: I0122 16:24:21.002543 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-sn4fm"] Jan 22 16:24:21 crc kubenswrapper[5032]: I0122 16:24:21.002846 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerName="dnsmasq-dns" containerID="cri-o://39029843a03a5b460a9849cea93e4b9551eddd90c34385524acbae60411c7bdf" gracePeriod=10 Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.137875 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.138022 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.176408 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.204267 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.352997 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.353208 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.387043 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.401826 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.830945 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.905126 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.905189 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.905200 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 16:24:22 crc kubenswrapper[5032]: I0122 16:24:22.905211 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 16:24:23 crc kubenswrapper[5032]: I0122 16:24:23.915645 5032 generic.go:334] "Generic (PLEG): container finished" podID="6edd56b3-2854-4b22-8d59-b620200d7cfe" containerID="0185960f97620fac9d4721bdb8a6837e4405402ad3a81960afc937ec6f06b8cb" exitCode=0 Jan 22 16:24:23 crc kubenswrapper[5032]: I0122 16:24:23.915769 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mk9tw" event={"ID":"6edd56b3-2854-4b22-8d59-b620200d7cfe","Type":"ContainerDied","Data":"0185960f97620fac9d4721bdb8a6837e4405402ad3a81960afc937ec6f06b8cb"} Jan 22 16:24:23 crc kubenswrapper[5032]: I0122 16:24:23.919313 5032 generic.go:334] "Generic (PLEG): container finished" podID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerID="39029843a03a5b460a9849cea93e4b9551eddd90c34385524acbae60411c7bdf" exitCode=0 Jan 22 16:24:23 crc kubenswrapper[5032]: I0122 16:24:23.919607 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" event={"ID":"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65","Type":"ContainerDied","Data":"39029843a03a5b460a9849cea93e4b9551eddd90c34385524acbae60411c7bdf"} Jan 22 16:24:24 crc kubenswrapper[5032]: I0122 16:24:24.926959 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:24:24 crc kubenswrapper[5032]: I0122 16:24:24.927283 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.023712 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.023803 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.069334 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.069802 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.069905 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.168555 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.223764 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-dns-svc\") pod \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.224063 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-sb\") pod \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.224106 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnpnt\" (UniqueName: \"kubernetes.io/projected/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-kube-api-access-gnpnt\") pod \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.224127 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-nb\") pod \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.224772 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-config\") pod \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\" (UID: \"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.244635 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-kube-api-access-gnpnt" (OuterVolumeSpecName: "kube-api-access-gnpnt") pod "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" (UID: "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65"). InnerVolumeSpecName "kube-api-access-gnpnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.327442 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnpnt\" (UniqueName: \"kubernetes.io/projected/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-kube-api-access-gnpnt\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.357282 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.363599 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" (UID: "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.390641 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" (UID: "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.392338 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-config" (OuterVolumeSpecName: "config") pod "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" (UID: "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.428180 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" (UID: "7a8c7d09-2d14-4839-afd1-2c66b1a4ef65"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.428287 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-scripts\") pod \"6edd56b3-2854-4b22-8d59-b620200d7cfe\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.428331 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-config-data\") pod \"6edd56b3-2854-4b22-8d59-b620200d7cfe\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.428384 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-credential-keys\") pod \"6edd56b3-2854-4b22-8d59-b620200d7cfe\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.428476 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-fernet-keys\") pod \"6edd56b3-2854-4b22-8d59-b620200d7cfe\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.428514 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4ls9\" (UniqueName: \"kubernetes.io/projected/6edd56b3-2854-4b22-8d59-b620200d7cfe-kube-api-access-z4ls9\") pod \"6edd56b3-2854-4b22-8d59-b620200d7cfe\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.428528 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-combined-ca-bundle\") pod \"6edd56b3-2854-4b22-8d59-b620200d7cfe\" (UID: \"6edd56b3-2854-4b22-8d59-b620200d7cfe\") " Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.430980 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.431302 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.431390 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.431457 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.432616 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-scripts" (OuterVolumeSpecName: "scripts") pod "6edd56b3-2854-4b22-8d59-b620200d7cfe" (UID: "6edd56b3-2854-4b22-8d59-b620200d7cfe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.441041 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6edd56b3-2854-4b22-8d59-b620200d7cfe" (UID: "6edd56b3-2854-4b22-8d59-b620200d7cfe"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.441108 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6edd56b3-2854-4b22-8d59-b620200d7cfe" (UID: "6edd56b3-2854-4b22-8d59-b620200d7cfe"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.441322 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6edd56b3-2854-4b22-8d59-b620200d7cfe-kube-api-access-z4ls9" (OuterVolumeSpecName: "kube-api-access-z4ls9") pod "6edd56b3-2854-4b22-8d59-b620200d7cfe" (UID: "6edd56b3-2854-4b22-8d59-b620200d7cfe"). InnerVolumeSpecName "kube-api-access-z4ls9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.452314 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-config-data" (OuterVolumeSpecName: "config-data") pod "6edd56b3-2854-4b22-8d59-b620200d7cfe" (UID: "6edd56b3-2854-4b22-8d59-b620200d7cfe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.462900 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6edd56b3-2854-4b22-8d59-b620200d7cfe" (UID: "6edd56b3-2854-4b22-8d59-b620200d7cfe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.533384 5032 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.533423 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4ls9\" (UniqueName: \"kubernetes.io/projected/6edd56b3-2854-4b22-8d59-b620200d7cfe-kube-api-access-z4ls9\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.533438 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.533451 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.533463 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.533474 5032 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6edd56b3-2854-4b22-8d59-b620200d7cfe-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.966097 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" event={"ID":"7a8c7d09-2d14-4839-afd1-2c66b1a4ef65","Type":"ContainerDied","Data":"8490cc383963c20e88655d2048c1601642fc98f9c2dabdb707161f951cc74e39"} Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.966156 5032 scope.go:117] "RemoveContainer" containerID="39029843a03a5b460a9849cea93e4b9551eddd90c34385524acbae60411c7bdf" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.966316 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-sn4fm" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.993417 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerStarted","Data":"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e"} Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.996178 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mk9tw" Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.996526 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mk9tw" event={"ID":"6edd56b3-2854-4b22-8d59-b620200d7cfe","Type":"ContainerDied","Data":"1f25c7a52fd7bfb4957cd256d81397be8dd73bfa61f8fb1f3208e6997c187c17"} Jan 22 16:24:25 crc kubenswrapper[5032]: I0122 16:24:25.996553 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f25c7a52fd7bfb4957cd256d81397be8dd73bfa61f8fb1f3208e6997c187c17" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.020735 5032 scope.go:117] "RemoveContainer" containerID="0f128633b7c55bb85c4cda23560cd0b5bb37e5fdedb65e7f9963623ce64a672f" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.049582 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-sn4fm"] Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.062314 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-sn4fm"] Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.068949 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-69959c7d95-wj9bh"] Jan 22 16:24:26 crc kubenswrapper[5032]: E0122 16:24:26.069318 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerName="dnsmasq-dns" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.069338 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerName="dnsmasq-dns" Jan 22 16:24:26 crc kubenswrapper[5032]: E0122 16:24:26.069351 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6edd56b3-2854-4b22-8d59-b620200d7cfe" containerName="keystone-bootstrap" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.069358 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6edd56b3-2854-4b22-8d59-b620200d7cfe" containerName="keystone-bootstrap" Jan 22 16:24:26 crc kubenswrapper[5032]: E0122 16:24:26.069389 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerName="init" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.069396 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerName="init" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.069539 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" containerName="dnsmasq-dns" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.069564 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6edd56b3-2854-4b22-8d59-b620200d7cfe" containerName="keystone-bootstrap" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.070130 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.073362 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wncsb" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.073551 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.073658 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.074186 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.074304 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.074306 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.077496 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-69959c7d95-wj9bh"] Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144026 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-combined-ca-bundle\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144111 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-credential-keys\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144167 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-config-data\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144190 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wffdz\" (UniqueName: \"kubernetes.io/projected/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-kube-api-access-wffdz\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144216 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-public-tls-certs\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144247 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-fernet-keys\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144303 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-scripts\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.144329 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-internal-tls-certs\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.245816 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-config-data\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.247088 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wffdz\" (UniqueName: \"kubernetes.io/projected/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-kube-api-access-wffdz\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.247507 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-public-tls-certs\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.247952 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-fernet-keys\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.248183 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-scripts\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.248277 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-internal-tls-certs\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.248419 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-combined-ca-bundle\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.248529 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-credential-keys\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.255823 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-fernet-keys\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.258387 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-scripts\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.258673 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-combined-ca-bundle\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.258737 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-public-tls-certs\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.259049 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-config-data\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.260066 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-internal-tls-certs\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.261572 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-credential-keys\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.264192 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wffdz\" (UniqueName: \"kubernetes.io/projected/bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0-kube-api-access-wffdz\") pod \"keystone-69959c7d95-wj9bh\" (UID: \"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0\") " pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.395136 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.469438 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a8c7d09-2d14-4839-afd1-2c66b1a4ef65" path="/var/lib/kubelet/pods/7a8c7d09-2d14-4839-afd1-2c66b1a4ef65/volumes" Jan 22 16:24:26 crc kubenswrapper[5032]: I0122 16:24:26.855418 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-69959c7d95-wj9bh"] Jan 22 16:24:27 crc kubenswrapper[5032]: I0122 16:24:27.006989 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-69959c7d95-wj9bh" event={"ID":"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0","Type":"ContainerStarted","Data":"3474252828ccdfbd57beee1bbdad2f8e9a431bf741ca8c2fd99496b28d20137c"} Jan 22 16:24:27 crc kubenswrapper[5032]: I0122 16:24:27.221057 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bf69fb-rdlps" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 22 16:24:27 crc kubenswrapper[5032]: I0122 16:24:27.343821 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6c7b5bf75b-t5dp6" podUID="f9991301-f3df-4fe5-872f-dfba0842580e" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.147:8443: connect: connection refused" Jan 22 16:24:28 crc kubenswrapper[5032]: I0122 16:24:28.039021 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-69959c7d95-wj9bh" event={"ID":"bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0","Type":"ContainerStarted","Data":"f7b1866bc23b7a1cb126156d63ffeb0734afad0dd17fa9d8b76ac1572c280ccb"} Jan 22 16:24:28 crc kubenswrapper[5032]: I0122 16:24:28.039842 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:28 crc kubenswrapper[5032]: I0122 16:24:28.071053 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-69959c7d95-wj9bh" podStartSLOduration=2.071025273 podStartE2EDuration="2.071025273s" podCreationTimestamp="2026-01-22 16:24:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:28.063339486 +0000 UTC m=+1171.875508537" watchObservedRunningTime="2026-01-22 16:24:28.071025273 +0000 UTC m=+1171.883194344" Jan 22 16:24:29 crc kubenswrapper[5032]: I0122 16:24:29.052781 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-jb65g" event={"ID":"175d212f-92e2-468a-ac51-5c821d160842","Type":"ContainerStarted","Data":"993447f33f973d6e3fee0c7551a69d446c00f519c48b60bfa88f5947350f5fef"} Jan 22 16:24:29 crc kubenswrapper[5032]: I0122 16:24:29.055881 5032 generic.go:334] "Generic (PLEG): container finished" podID="18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" containerID="138bf192d4d9cd4c98df6e324c87216f0d2deb4d054507fb90afd38369a65db0" exitCode=0 Jan 22 16:24:29 crc kubenswrapper[5032]: I0122 16:24:29.055970 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ng56d" event={"ID":"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7","Type":"ContainerDied","Data":"138bf192d4d9cd4c98df6e324c87216f0d2deb4d054507fb90afd38369a65db0"} Jan 22 16:24:29 crc kubenswrapper[5032]: I0122 16:24:29.058698 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dxmk2" event={"ID":"857e311d-5f3f-4c81-9f86-6d0a3c9e0307","Type":"ContainerStarted","Data":"27e06268d6656feccfcd5f2b92f2c1d29cc816b29fe34dce87415dfe46f214d7"} Jan 22 16:24:29 crc kubenswrapper[5032]: I0122 16:24:29.076017 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-jb65g" podStartSLOduration=3.237892778 podStartE2EDuration="52.076001175s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="2026-01-22 16:23:39.337555388 +0000 UTC m=+1123.149724419" lastFinishedPulling="2026-01-22 16:24:28.175663785 +0000 UTC m=+1171.987832816" observedRunningTime="2026-01-22 16:24:29.072592893 +0000 UTC m=+1172.884761924" watchObservedRunningTime="2026-01-22 16:24:29.076001175 +0000 UTC m=+1172.888170206" Jan 22 16:24:29 crc kubenswrapper[5032]: I0122 16:24:29.107951 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-dxmk2" podStartSLOduration=3.05700145 podStartE2EDuration="52.107842984s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="2026-01-22 16:23:39.123231758 +0000 UTC m=+1122.935400789" lastFinishedPulling="2026-01-22 16:24:28.174073292 +0000 UTC m=+1171.986242323" observedRunningTime="2026-01-22 16:24:29.096793986 +0000 UTC m=+1172.908963007" watchObservedRunningTime="2026-01-22 16:24:29.107842984 +0000 UTC m=+1172.920012035" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.712497 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ng56d" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.858436 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-config-data\") pod \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.859010 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhgjs\" (UniqueName: \"kubernetes.io/projected/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-kube-api-access-fhgjs\") pod \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.859100 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-logs\") pod \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.859171 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-scripts\") pod \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.859244 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-combined-ca-bundle\") pod \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\" (UID: \"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7\") " Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.859660 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-logs" (OuterVolumeSpecName: "logs") pod "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" (UID: "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.859814 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.867431 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-scripts" (OuterVolumeSpecName: "scripts") pod "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" (UID: "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.867995 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-kube-api-access-fhgjs" (OuterVolumeSpecName: "kube-api-access-fhgjs") pod "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" (UID: "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7"). InnerVolumeSpecName "kube-api-access-fhgjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.896208 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" (UID: "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.900575 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-config-data" (OuterVolumeSpecName: "config-data") pod "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" (UID: "18e81df2-1e39-45a9-9ea9-c9b9a54b92f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.961362 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.961402 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhgjs\" (UniqueName: \"kubernetes.io/projected/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-kube-api-access-fhgjs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.961417 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:30 crc kubenswrapper[5032]: I0122 16:24:30.961428 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.075628 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ng56d" event={"ID":"18e81df2-1e39-45a9-9ea9-c9b9a54b92f7","Type":"ContainerDied","Data":"1c298fe26cdc0b85d5e2da889ad4f560268af5e8cbf23d5fecea550693f6ff68"} Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.075664 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c298fe26cdc0b85d5e2da889ad4f560268af5e8cbf23d5fecea550693f6ff68" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.075687 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ng56d" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.269649 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-697d8b94d8-5r62k"] Jan 22 16:24:31 crc kubenswrapper[5032]: E0122 16:24:31.270566 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" containerName="placement-db-sync" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.270658 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" containerName="placement-db-sync" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.270961 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" containerName="placement-db-sync" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.272212 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.277051 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.277580 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.277698 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-xfthf" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.277835 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.277080 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.288015 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-697d8b94d8-5r62k"] Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.370848 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-public-tls-certs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.370944 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-logs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.370968 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-config-data\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.371005 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-combined-ca-bundle\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.371026 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s8bn\" (UniqueName: \"kubernetes.io/projected/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-kube-api-access-9s8bn\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.371165 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-scripts\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.371206 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-internal-tls-certs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.472506 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-public-tls-certs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.472579 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-logs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.472598 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-config-data\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.472621 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-combined-ca-bundle\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.472637 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9s8bn\" (UniqueName: \"kubernetes.io/projected/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-kube-api-access-9s8bn\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.472670 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-scripts\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.472686 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-internal-tls-certs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.473094 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-logs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.480246 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-internal-tls-certs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.480350 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-public-tls-certs\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.483055 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-scripts\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.483930 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-config-data\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.494341 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-combined-ca-bundle\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.494956 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s8bn\" (UniqueName: \"kubernetes.io/projected/ac44c87a-d256-43f2-9dcb-b7b398ff8b6b-kube-api-access-9s8bn\") pod \"placement-697d8b94d8-5r62k\" (UID: \"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b\") " pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:31 crc kubenswrapper[5032]: I0122 16:24:31.640819 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:33 crc kubenswrapper[5032]: I0122 16:24:33.017047 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:24:33 crc kubenswrapper[5032]: I0122 16:24:33.017199 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:24:33 crc kubenswrapper[5032]: I0122 16:24:33.017276 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:24:33 crc kubenswrapper[5032]: I0122 16:24:33.018241 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e07bad5ddfef4b3b16bdde98bfadfd138cd9c8946f3f1a8c285fa09c7eb7871"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:24:33 crc kubenswrapper[5032]: I0122 16:24:33.018353 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://0e07bad5ddfef4b3b16bdde98bfadfd138cd9c8946f3f1a8c285fa09c7eb7871" gracePeriod=600 Jan 22 16:24:34 crc kubenswrapper[5032]: I0122 16:24:34.100610 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="0e07bad5ddfef4b3b16bdde98bfadfd138cd9c8946f3f1a8c285fa09c7eb7871" exitCode=0 Jan 22 16:24:34 crc kubenswrapper[5032]: I0122 16:24:34.100834 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"0e07bad5ddfef4b3b16bdde98bfadfd138cd9c8946f3f1a8c285fa09c7eb7871"} Jan 22 16:24:34 crc kubenswrapper[5032]: I0122 16:24:34.100992 5032 scope.go:117] "RemoveContainer" containerID="7c99ec9e6db96c22555ef66c4f25515b95be204438e7cfc23aad474c49a63d5f" Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.150291 5032 generic.go:334] "Generic (PLEG): container finished" podID="8b048493-c081-48d3-a908-70bf0efeba64" containerID="0ff743aaa4eaee22f03fe812ea2d5b92cb2e3024f4648a0020790760250a0f33" exitCode=0 Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.150825 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gf5jg" event={"ID":"8b048493-c081-48d3-a908-70bf0efeba64","Type":"ContainerDied","Data":"0ff743aaa4eaee22f03fe812ea2d5b92cb2e3024f4648a0020790760250a0f33"} Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.154390 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerStarted","Data":"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1"} Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.154581 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-central-agent" containerID="cri-o://162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3" gracePeriod=30 Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.154618 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.154676 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-notification-agent" containerID="cri-o://832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df" gracePeriod=30 Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.154686 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="sg-core" containerID="cri-o://ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e" gracePeriod=30 Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.154778 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="proxy-httpd" containerID="cri-o://ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1" gracePeriod=30 Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.163311 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"faf935adc77a3c0abaa2d5b7ee2ef0557447fb464c98230ba8a8d7f0657845b1"} Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.244060 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.523810668 podStartE2EDuration="1m1.244034183s" podCreationTimestamp="2026-01-22 16:23:37 +0000 UTC" firstStartedPulling="2026-01-22 16:23:39.009768595 +0000 UTC m=+1122.821937626" lastFinishedPulling="2026-01-22 16:24:37.72999211 +0000 UTC m=+1181.542161141" observedRunningTime="2026-01-22 16:24:38.21129154 +0000 UTC m=+1182.023460581" watchObservedRunningTime="2026-01-22 16:24:38.244034183 +0000 UTC m=+1182.056203224" Jan 22 16:24:38 crc kubenswrapper[5032]: I0122 16:24:38.244748 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-697d8b94d8-5r62k"] Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.190138 5032 generic.go:334] "Generic (PLEG): container finished" podID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerID="ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e" exitCode=2 Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.190254 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerDied","Data":"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e"} Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.197219 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-697d8b94d8-5r62k" event={"ID":"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b","Type":"ContainerStarted","Data":"157bae84f3a71851b14ca4208b7709782bc9dd1595a8c06b15e85405f47ecdfd"} Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.224188 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.253257 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.561092 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.624395 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-config\") pod \"8b048493-c081-48d3-a908-70bf0efeba64\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.624540 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-combined-ca-bundle\") pod \"8b048493-c081-48d3-a908-70bf0efeba64\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.624569 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzmc2\" (UniqueName: \"kubernetes.io/projected/8b048493-c081-48d3-a908-70bf0efeba64-kube-api-access-kzmc2\") pod \"8b048493-c081-48d3-a908-70bf0efeba64\" (UID: \"8b048493-c081-48d3-a908-70bf0efeba64\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.631182 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b048493-c081-48d3-a908-70bf0efeba64-kube-api-access-kzmc2" (OuterVolumeSpecName: "kube-api-access-kzmc2") pod "8b048493-c081-48d3-a908-70bf0efeba64" (UID: "8b048493-c081-48d3-a908-70bf0efeba64"). InnerVolumeSpecName "kube-api-access-kzmc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.650064 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-config" (OuterVolumeSpecName: "config") pod "8b048493-c081-48d3-a908-70bf0efeba64" (UID: "8b048493-c081-48d3-a908-70bf0efeba64"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.652133 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b048493-c081-48d3-a908-70bf0efeba64" (UID: "8b048493-c081-48d3-a908-70bf0efeba64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.726948 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.726987 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b048493-c081-48d3-a908-70bf0efeba64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.726998 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzmc2\" (UniqueName: \"kubernetes.io/projected/8b048493-c081-48d3-a908-70bf0efeba64-kube-api-access-kzmc2\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.900347 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930105 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-sg-core-conf-yaml\") pod \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930193 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-combined-ca-bundle\") pod \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930262 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-config-data\") pod \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930331 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqrr7\" (UniqueName: \"kubernetes.io/projected/dcdacfde-30ea-4db9-af16-525aac6f2d5f-kube-api-access-zqrr7\") pod \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930465 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-run-httpd\") pod \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930500 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-log-httpd\") pod \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930548 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-scripts\") pod \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\" (UID: \"dcdacfde-30ea-4db9-af16-525aac6f2d5f\") " Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.930999 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "dcdacfde-30ea-4db9-af16-525aac6f2d5f" (UID: "dcdacfde-30ea-4db9-af16-525aac6f2d5f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.931097 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "dcdacfde-30ea-4db9-af16-525aac6f2d5f" (UID: "dcdacfde-30ea-4db9-af16-525aac6f2d5f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.934165 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-scripts" (OuterVolumeSpecName: "scripts") pod "dcdacfde-30ea-4db9-af16-525aac6f2d5f" (UID: "dcdacfde-30ea-4db9-af16-525aac6f2d5f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.934270 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcdacfde-30ea-4db9-af16-525aac6f2d5f-kube-api-access-zqrr7" (OuterVolumeSpecName: "kube-api-access-zqrr7") pod "dcdacfde-30ea-4db9-af16-525aac6f2d5f" (UID: "dcdacfde-30ea-4db9-af16-525aac6f2d5f"). InnerVolumeSpecName "kube-api-access-zqrr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:39 crc kubenswrapper[5032]: I0122 16:24:39.975785 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "dcdacfde-30ea-4db9-af16-525aac6f2d5f" (UID: "dcdacfde-30ea-4db9-af16-525aac6f2d5f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.007295 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dcdacfde-30ea-4db9-af16-525aac6f2d5f" (UID: "dcdacfde-30ea-4db9-af16-525aac6f2d5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.023538 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-config-data" (OuterVolumeSpecName: "config-data") pod "dcdacfde-30ea-4db9-af16-525aac6f2d5f" (UID: "dcdacfde-30ea-4db9-af16-525aac6f2d5f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.032706 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.032740 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.032754 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.032767 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqrr7\" (UniqueName: \"kubernetes.io/projected/dcdacfde-30ea-4db9-af16-525aac6f2d5f-kube-api-access-zqrr7\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.032782 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.032793 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcdacfde-30ea-4db9-af16-525aac6f2d5f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.032805 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcdacfde-30ea-4db9-af16-525aac6f2d5f-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.213964 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-697d8b94d8-5r62k" event={"ID":"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b","Type":"ContainerStarted","Data":"9121462472ac914d033820bd3048d09e425036b38667356da349fa6de8e2be77"} Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.214014 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-697d8b94d8-5r62k" event={"ID":"ac44c87a-d256-43f2-9dcb-b7b398ff8b6b","Type":"ContainerStarted","Data":"9a1e059ffb8bbc17733b974d50c5e589dd848291d194e4212a1b2daf225ef6f1"} Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.214080 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.214108 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.219225 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gf5jg" event={"ID":"8b048493-c081-48d3-a908-70bf0efeba64","Type":"ContainerDied","Data":"e0bf3a71c92568d74f6ee40ea62e080b842993a80f8410d9ea4ae9179c4daf43"} Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.219355 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gf5jg" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.220038 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0bf3a71c92568d74f6ee40ea62e080b842993a80f8410d9ea4ae9179c4daf43" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229048 5032 generic.go:334] "Generic (PLEG): container finished" podID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerID="ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1" exitCode=0 Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229077 5032 generic.go:334] "Generic (PLEG): container finished" podID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerID="832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df" exitCode=0 Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229106 5032 generic.go:334] "Generic (PLEG): container finished" podID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerID="162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3" exitCode=0 Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229116 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229131 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerDied","Data":"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1"} Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229170 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerDied","Data":"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df"} Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229186 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerDied","Data":"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3"} Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229200 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcdacfde-30ea-4db9-af16-525aac6f2d5f","Type":"ContainerDied","Data":"956b62437028662d752db4b119048ed8a3512baa180a33b3b62fcacafa44becd"} Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.229221 5032 scope.go:117] "RemoveContainer" containerID="ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.260782 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-697d8b94d8-5r62k" podStartSLOduration=9.260765049 podStartE2EDuration="9.260765049s" podCreationTimestamp="2026-01-22 16:24:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:40.257133361 +0000 UTC m=+1184.069302412" watchObservedRunningTime="2026-01-22 16:24:40.260765049 +0000 UTC m=+1184.072934090" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.269864 5032 scope.go:117] "RemoveContainer" containerID="ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.286290 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.298187 5032 scope.go:117] "RemoveContainer" containerID="832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.308440 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.327755 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.328189 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="sg-core" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328218 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="sg-core" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.328248 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="proxy-httpd" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328260 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="proxy-httpd" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.328279 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-notification-agent" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328288 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-notification-agent" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.328301 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-central-agent" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328308 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-central-agent" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.328331 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b048493-c081-48d3-a908-70bf0efeba64" containerName="neutron-db-sync" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328339 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b048493-c081-48d3-a908-70bf0efeba64" containerName="neutron-db-sync" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328528 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-central-agent" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328586 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b048493-c081-48d3-a908-70bf0efeba64" containerName="neutron-db-sync" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328606 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="ceilometer-notification-agent" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328628 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="proxy-httpd" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.328648 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" containerName="sg-core" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.330779 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.337000 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.337181 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.343777 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.359081 5032 scope.go:117] "RemoveContainer" containerID="162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.397977 5032 scope.go:117] "RemoveContainer" containerID="ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.404047 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1\": container with ID starting with ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1 not found: ID does not exist" containerID="ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.404086 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1"} err="failed to get container status \"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1\": rpc error: code = NotFound desc = could not find container \"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1\": container with ID starting with ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1 not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.404111 5032 scope.go:117] "RemoveContainer" containerID="ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.408981 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e\": container with ID starting with ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e not found: ID does not exist" containerID="ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.409027 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e"} err="failed to get container status \"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e\": rpc error: code = NotFound desc = could not find container \"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e\": container with ID starting with ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.409053 5032 scope.go:117] "RemoveContainer" containerID="832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.409392 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df\": container with ID starting with 832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df not found: ID does not exist" containerID="832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.409414 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df"} err="failed to get container status \"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df\": rpc error: code = NotFound desc = could not find container \"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df\": container with ID starting with 832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.409427 5032 scope.go:117] "RemoveContainer" containerID="162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.410936 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3\": container with ID starting with 162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3 not found: ID does not exist" containerID="162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.410955 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3"} err="failed to get container status \"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3\": rpc error: code = NotFound desc = could not find container \"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3\": container with ID starting with 162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3 not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.410968 5032 scope.go:117] "RemoveContainer" containerID="ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.415105 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1"} err="failed to get container status \"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1\": rpc error: code = NotFound desc = could not find container \"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1\": container with ID starting with ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1 not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.415169 5032 scope.go:117] "RemoveContainer" containerID="ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.420910 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e"} err="failed to get container status \"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e\": rpc error: code = NotFound desc = could not find container \"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e\": container with ID starting with ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.420955 5032 scope.go:117] "RemoveContainer" containerID="832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.425995 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df"} err="failed to get container status \"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df\": rpc error: code = NotFound desc = could not find container \"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df\": container with ID starting with 832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.426036 5032 scope.go:117] "RemoveContainer" containerID="162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.426724 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-t7vtj"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.429013 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3"} err="failed to get container status \"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3\": rpc error: code = NotFound desc = could not find container \"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3\": container with ID starting with 162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3 not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.429047 5032 scope.go:117] "RemoveContainer" containerID="ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.431379 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.436028 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1"} err="failed to get container status \"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1\": rpc error: code = NotFound desc = could not find container \"ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1\": container with ID starting with ce755ee8e200efb1db9766db2b82276bbe89ca8619568f2bc68fe93f0e061ad1 not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.436062 5032 scope.go:117] "RemoveContainer" containerID="ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.439925 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-run-httpd\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.440000 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-scripts\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.440041 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-config-data\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.440083 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.440132 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rlq6\" (UniqueName: \"kubernetes.io/projected/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-kube-api-access-8rlq6\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.440182 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.440233 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-log-httpd\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.440970 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e"} err="failed to get container status \"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e\": rpc error: code = NotFound desc = could not find container \"ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e\": container with ID starting with ef0a869428ed7e0f8cdb84f46b103e53a20ad350d2ac015e36f5cacd1ee8db5e not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.441011 5032 scope.go:117] "RemoveContainer" containerID="832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.444089 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df"} err="failed to get container status \"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df\": rpc error: code = NotFound desc = could not find container \"832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df\": container with ID starting with 832a425b832e315656b0d38a9fdf715cd4446ab535f55d0a7ebb65f6365a89df not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.444112 5032 scope.go:117] "RemoveContainer" containerID="162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.456392 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3"} err="failed to get container status \"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3\": rpc error: code = NotFound desc = could not find container \"162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3\": container with ID starting with 162e6dbdbc9ad1203823a0e05881237217bf0c0c178d93ad6395742985a6ecd3 not found: ID does not exist" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.488197 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcdacfde-30ea-4db9-af16-525aac6f2d5f" path="/var/lib/kubelet/pods/dcdacfde-30ea-4db9-af16-525aac6f2d5f/volumes" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.489007 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-t7vtj"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.489032 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-66dc46f64-9gscp"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.492599 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.497452 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.497653 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.497784 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-v4dkx" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.497929 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.543516 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-66dc46f64-9gscp"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.544874 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-config-data\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.544930 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-svc\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.544999 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.545034 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.545067 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.545142 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rlq6\" (UniqueName: \"kubernetes.io/projected/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-kube-api-access-8rlq6\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.547095 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-config\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.547274 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.547464 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-ovndb-tls-certs\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.547663 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-config\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.548223 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgp6h\" (UniqueName: \"kubernetes.io/projected/89dac56e-0d23-4571-8865-36a1bbfa9731-kube-api-access-mgp6h\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.549928 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-httpd-config\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.549995 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-combined-ca-bundle\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.550062 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-log-httpd\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.550205 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-run-httpd\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.553351 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-log-httpd\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.553464 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.553518 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lkcp\" (UniqueName: \"kubernetes.io/projected/ff349c82-6063-48d6-9d1f-d5075e2163b5-kube-api-access-9lkcp\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.554936 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-scripts\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.555841 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-run-httpd\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.559066 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.563976 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.566403 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: E0122 16:24:40.568556 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config-data kube-api-access-8rlq6 scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="1256a93d-e63e-4e01-96bf-e6f5c78cd08d" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.569658 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-scripts\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.570319 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-config-data\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.574213 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rlq6\" (UniqueName: \"kubernetes.io/projected/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-kube-api-access-8rlq6\") pod \"ceilometer-0\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " pod="openstack/ceilometer-0" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656762 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-svc\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656818 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656837 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656904 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-config\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656928 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-ovndb-tls-certs\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656945 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-config\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656973 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgp6h\" (UniqueName: \"kubernetes.io/projected/89dac56e-0d23-4571-8865-36a1bbfa9731-kube-api-access-mgp6h\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.656988 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-httpd-config\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.657005 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-combined-ca-bundle\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.657068 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.657086 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lkcp\" (UniqueName: \"kubernetes.io/projected/ff349c82-6063-48d6-9d1f-d5075e2163b5-kube-api-access-9lkcp\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.658480 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-svc\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.659244 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.659884 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.660030 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.660284 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-config\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.662674 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-httpd-config\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.663412 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-config\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.665829 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-combined-ca-bundle\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.666414 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-ovndb-tls-certs\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.674968 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lkcp\" (UniqueName: \"kubernetes.io/projected/ff349c82-6063-48d6-9d1f-d5075e2163b5-kube-api-access-9lkcp\") pod \"neutron-66dc46f64-9gscp\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.686265 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgp6h\" (UniqueName: \"kubernetes.io/projected/89dac56e-0d23-4571-8865-36a1bbfa9731-kube-api-access-mgp6h\") pod \"dnsmasq-dns-6b7b667979-t7vtj\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.778497 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:40 crc kubenswrapper[5032]: I0122 16:24:40.812136 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.236571 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6c7b5bf75b-t5dp6" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.242035 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.257510 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-t7vtj"] Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.259537 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:41 crc kubenswrapper[5032]: W0122 16:24:41.262516 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89dac56e_0d23_4571_8865_36a1bbfa9731.slice/crio-a5094480169d78b46b6ee46b8a61c6761e346fdb975ef8c9394edf6e3f6e6189 WatchSource:0}: Error finding container a5094480169d78b46b6ee46b8a61c6761e346fdb975ef8c9394edf6e3f6e6189: Status 404 returned error can't find the container with id a5094480169d78b46b6ee46b8a61c6761e346fdb975ef8c9394edf6e3f6e6189 Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.309410 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.320144 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68bf69fb-rdlps"] Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.375271 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-scripts\") pod \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.375391 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-run-httpd\") pod \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.375433 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-combined-ca-bundle\") pod \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.375456 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-log-httpd\") pod \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.375555 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-sg-core-conf-yaml\") pod \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.375594 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rlq6\" (UniqueName: \"kubernetes.io/projected/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-kube-api-access-8rlq6\") pod \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.375621 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-config-data\") pod \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\" (UID: \"1256a93d-e63e-4e01-96bf-e6f5c78cd08d\") " Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.377893 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1256a93d-e63e-4e01-96bf-e6f5c78cd08d" (UID: "1256a93d-e63e-4e01-96bf-e6f5c78cd08d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.378407 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.380104 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1256a93d-e63e-4e01-96bf-e6f5c78cd08d" (UID: "1256a93d-e63e-4e01-96bf-e6f5c78cd08d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.384504 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-kube-api-access-8rlq6" (OuterVolumeSpecName: "kube-api-access-8rlq6") pod "1256a93d-e63e-4e01-96bf-e6f5c78cd08d" (UID: "1256a93d-e63e-4e01-96bf-e6f5c78cd08d"). InnerVolumeSpecName "kube-api-access-8rlq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.384703 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-scripts" (OuterVolumeSpecName: "scripts") pod "1256a93d-e63e-4e01-96bf-e6f5c78cd08d" (UID: "1256a93d-e63e-4e01-96bf-e6f5c78cd08d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.386298 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-config-data" (OuterVolumeSpecName: "config-data") pod "1256a93d-e63e-4e01-96bf-e6f5c78cd08d" (UID: "1256a93d-e63e-4e01-96bf-e6f5c78cd08d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.388882 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1256a93d-e63e-4e01-96bf-e6f5c78cd08d" (UID: "1256a93d-e63e-4e01-96bf-e6f5c78cd08d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.390992 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1256a93d-e63e-4e01-96bf-e6f5c78cd08d" (UID: "1256a93d-e63e-4e01-96bf-e6f5c78cd08d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:41 crc kubenswrapper[5032]: W0122 16:24:41.453168 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff349c82_6063_48d6_9d1f_d5075e2163b5.slice/crio-aa08fd3e88822cf303c48572dd23545d389e889bb2de8caedf4cb3a36178b654 WatchSource:0}: Error finding container aa08fd3e88822cf303c48572dd23545d389e889bb2de8caedf4cb3a36178b654: Status 404 returned error can't find the container with id aa08fd3e88822cf303c48572dd23545d389e889bb2de8caedf4cb3a36178b654 Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.455243 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-66dc46f64-9gscp"] Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.480154 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.480525 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rlq6\" (UniqueName: \"kubernetes.io/projected/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-kube-api-access-8rlq6\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.480542 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.480553 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.480565 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:41 crc kubenswrapper[5032]: I0122 16:24:41.480577 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1256a93d-e63e-4e01-96bf-e6f5c78cd08d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.256093 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66dc46f64-9gscp" event={"ID":"ff349c82-6063-48d6-9d1f-d5075e2163b5","Type":"ContainerStarted","Data":"ebf3523e15ede3ae09eec3521bbaf0ef23ae461e03d369a1954e332476314201"} Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.256374 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66dc46f64-9gscp" event={"ID":"ff349c82-6063-48d6-9d1f-d5075e2163b5","Type":"ContainerStarted","Data":"b01d939fa86219beeb099fc1a3edd0a27d8c44f7ca0eda96a010c4caa7b6ed24"} Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.256388 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66dc46f64-9gscp" event={"ID":"ff349c82-6063-48d6-9d1f-d5075e2163b5","Type":"ContainerStarted","Data":"aa08fd3e88822cf303c48572dd23545d389e889bb2de8caedf4cb3a36178b654"} Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.257324 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.259213 5032 generic.go:334] "Generic (PLEG): container finished" podID="175d212f-92e2-468a-ac51-5c821d160842" containerID="993447f33f973d6e3fee0c7551a69d446c00f519c48b60bfa88f5947350f5fef" exitCode=0 Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.259258 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-jb65g" event={"ID":"175d212f-92e2-468a-ac51-5c821d160842","Type":"ContainerDied","Data":"993447f33f973d6e3fee0c7551a69d446c00f519c48b60bfa88f5947350f5fef"} Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.262136 5032 generic.go:334] "Generic (PLEG): container finished" podID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerID="f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c" exitCode=0 Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.262344 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68bf69fb-rdlps" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon-log" containerID="cri-o://1f79f0f2126156b0ebaf59b79d17e3c6eec85d2cf121f7c2ba7096f5f7293ffe" gracePeriod=30 Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.263393 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" event={"ID":"89dac56e-0d23-4571-8865-36a1bbfa9731","Type":"ContainerDied","Data":"f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c"} Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.263425 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" event={"ID":"89dac56e-0d23-4571-8865-36a1bbfa9731","Type":"ContainerStarted","Data":"a5094480169d78b46b6ee46b8a61c6761e346fdb975ef8c9394edf6e3f6e6189"} Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.263457 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.264339 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68bf69fb-rdlps" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" containerID="cri-o://c19e7c4f249df844077ca639ddf4702d340f082798870e59ea1c0a5891e3d050" gracePeriod=30 Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.305372 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-66dc46f64-9gscp" podStartSLOduration=2.3053514059999998 podStartE2EDuration="2.305351406s" podCreationTimestamp="2026-01-22 16:24:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:42.280422774 +0000 UTC m=+1186.092591805" watchObservedRunningTime="2026-01-22 16:24:42.305351406 +0000 UTC m=+1186.117520427" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.438548 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.454066 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.466473 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1256a93d-e63e-4e01-96bf-e6f5c78cd08d" path="/var/lib/kubelet/pods/1256a93d-e63e-4e01-96bf-e6f5c78cd08d/volumes" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.468413 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.470361 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.473711 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.473911 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.475377 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.598253 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-scripts\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.598603 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-log-httpd\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.598705 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrj84\" (UniqueName: \"kubernetes.io/projected/4277b5eb-5f14-47db-801a-b7b2356253a9-kube-api-access-rrj84\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.598890 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.598936 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-run-httpd\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.598981 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-config-data\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.599023 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.685616 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c9dff9df7-cgg9f"] Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.687362 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.689638 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.689894 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700233 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-scripts\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700276 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-log-httpd\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700342 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrj84\" (UniqueName: \"kubernetes.io/projected/4277b5eb-5f14-47db-801a-b7b2356253a9-kube-api-access-rrj84\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700392 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700407 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-run-httpd\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700425 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-config-data\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700449 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.700786 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-log-httpd\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.701007 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-run-httpd\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.709969 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.710277 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c9dff9df7-cgg9f"] Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.715621 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-scripts\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.716490 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.717050 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-config-data\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.795765 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrj84\" (UniqueName: \"kubernetes.io/projected/4277b5eb-5f14-47db-801a-b7b2356253a9-kube-api-access-rrj84\") pod \"ceilometer-0\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " pod="openstack/ceilometer-0" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.811445 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-internal-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.811491 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-ovndb-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.811528 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-config\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.811554 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-httpd-config\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.811611 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-combined-ca-bundle\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.811661 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-public-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.811693 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln2xl\" (UniqueName: \"kubernetes.io/projected/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-kube-api-access-ln2xl\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.912716 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-combined-ca-bundle\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.912787 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-public-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.912821 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln2xl\" (UniqueName: \"kubernetes.io/projected/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-kube-api-access-ln2xl\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.912854 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-internal-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.912897 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-ovndb-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.912928 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-config\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.912952 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-httpd-config\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.916552 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-ovndb-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.920339 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-httpd-config\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.921357 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-public-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.925657 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-combined-ca-bundle\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.933497 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-internal-tls-certs\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.935626 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-config\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:42 crc kubenswrapper[5032]: I0122 16:24:42.938669 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln2xl\" (UniqueName: \"kubernetes.io/projected/31f3bc59-5ab8-4277-a1cf-d496da47a1a1-kube-api-access-ln2xl\") pod \"neutron-5c9dff9df7-cgg9f\" (UID: \"31f3bc59-5ab8-4277-a1cf-d496da47a1a1\") " pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.092675 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.166371 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.298992 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" event={"ID":"89dac56e-0d23-4571-8865-36a1bbfa9731","Type":"ContainerStarted","Data":"625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960"} Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.299329 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.573560 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" podStartSLOduration=3.573537836 podStartE2EDuration="3.573537836s" podCreationTimestamp="2026-01-22 16:24:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:43.322193078 +0000 UTC m=+1187.134362129" watchObservedRunningTime="2026-01-22 16:24:43.573537836 +0000 UTC m=+1187.385706867" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.581459 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.775961 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-jb65g" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.830700 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jz2k5\" (UniqueName: \"kubernetes.io/projected/175d212f-92e2-468a-ac51-5c821d160842-kube-api-access-jz2k5\") pod \"175d212f-92e2-468a-ac51-5c821d160842\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.830796 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-combined-ca-bundle\") pod \"175d212f-92e2-468a-ac51-5c821d160842\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.830833 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-db-sync-config-data\") pod \"175d212f-92e2-468a-ac51-5c821d160842\" (UID: \"175d212f-92e2-468a-ac51-5c821d160842\") " Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.839545 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "175d212f-92e2-468a-ac51-5c821d160842" (UID: "175d212f-92e2-468a-ac51-5c821d160842"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.851422 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/175d212f-92e2-468a-ac51-5c821d160842-kube-api-access-jz2k5" (OuterVolumeSpecName: "kube-api-access-jz2k5") pod "175d212f-92e2-468a-ac51-5c821d160842" (UID: "175d212f-92e2-468a-ac51-5c821d160842"). InnerVolumeSpecName "kube-api-access-jz2k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.858030 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "175d212f-92e2-468a-ac51-5c821d160842" (UID: "175d212f-92e2-468a-ac51-5c821d160842"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.866959 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c9dff9df7-cgg9f"] Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.933580 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.933616 5032 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175d212f-92e2-468a-ac51-5c821d160842-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:43 crc kubenswrapper[5032]: I0122 16:24:43.933626 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jz2k5\" (UniqueName: \"kubernetes.io/projected/175d212f-92e2-468a-ac51-5c821d160842-kube-api-access-jz2k5\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.307458 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9dff9df7-cgg9f" event={"ID":"31f3bc59-5ab8-4277-a1cf-d496da47a1a1","Type":"ContainerStarted","Data":"be6866220ac8ff6fc0a5e15723f96bc7aa453b3996c0fc84cababc43db3b06fb"} Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.307816 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9dff9df7-cgg9f" event={"ID":"31f3bc59-5ab8-4277-a1cf-d496da47a1a1","Type":"ContainerStarted","Data":"02e26c526961cd950b76ae8d9f42c58a371559f7c13793d96f6333356d8182c5"} Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.312608 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-jb65g" event={"ID":"175d212f-92e2-468a-ac51-5c821d160842","Type":"ContainerDied","Data":"fe0bbcbea215ffe01e20b9c7a3f5524474d58c364a98256a754ec2c7961c7e26"} Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.312636 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe0bbcbea215ffe01e20b9c7a3f5524474d58c364a98256a754ec2c7961c7e26" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.312686 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-jb65g" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.324630 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerStarted","Data":"68b662703be8dfb70cab672ebeec28f57020bfb6be8969be62c90c9c0dbd6028"} Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.599474 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6b55c79c5-6tv8n"] Jan 22 16:24:44 crc kubenswrapper[5032]: E0122 16:24:44.600152 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175d212f-92e2-468a-ac51-5c821d160842" containerName="barbican-db-sync" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.600242 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="175d212f-92e2-468a-ac51-5c821d160842" containerName="barbican-db-sync" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.600606 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="175d212f-92e2-468a-ac51-5c821d160842" containerName="barbican-db-sync" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.601786 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.614731 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-9fxfm" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.614920 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.615140 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.616148 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6b55c79c5-6tv8n"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.650969 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75sgj\" (UniqueName: \"kubernetes.io/projected/c4d1b24b-8e40-4f2c-9948-55faab954707-kube-api-access-75sgj\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.651013 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-config-data-custom\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.651055 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-combined-ca-bundle\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.651071 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4d1b24b-8e40-4f2c-9948-55faab954707-logs\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.651150 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-config-data\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.658906 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-bdb85dc66-qzsm2"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.660335 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.673948 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.702932 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-bdb85dc66-qzsm2"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.731097 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-t7vtj"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.741464 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jrlfk"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.743025 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.752626 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-logs\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.752899 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-config-data-custom\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753000 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-config-data\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753098 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-config-data\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753247 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75sgj\" (UniqueName: \"kubernetes.io/projected/c4d1b24b-8e40-4f2c-9948-55faab954707-kube-api-access-75sgj\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753353 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-config-data-custom\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753438 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-combined-ca-bundle\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753501 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4d1b24b-8e40-4f2c-9948-55faab954707-logs\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753567 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-combined-ca-bundle\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.753692 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xt95g\" (UniqueName: \"kubernetes.io/projected/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-kube-api-access-xt95g\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.755575 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4d1b24b-8e40-4f2c-9948-55faab954707-logs\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.760385 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-config-data\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.761525 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jrlfk"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.761715 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-combined-ca-bundle\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.774960 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4d1b24b-8e40-4f2c-9948-55faab954707-config-data-custom\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.807710 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75sgj\" (UniqueName: \"kubernetes.io/projected/c4d1b24b-8e40-4f2c-9948-55faab954707-kube-api-access-75sgj\") pod \"barbican-worker-6b55c79c5-6tv8n\" (UID: \"c4d1b24b-8e40-4f2c-9948-55faab954707\") " pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854512 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854569 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-combined-ca-bundle\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854595 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rprt\" (UniqueName: \"kubernetes.io/projected/cb02c144-1900-4e7a-9bd3-7639134ee60a-kube-api-access-9rprt\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854611 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-config\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854657 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854682 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xt95g\" (UniqueName: \"kubernetes.io/projected/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-kube-api-access-xt95g\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854701 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-logs\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854716 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-config-data-custom\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854735 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-config-data\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854761 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.854777 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.858348 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-logs\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.864459 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-combined-ca-bundle\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.882730 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-config-data-custom\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.889297 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-config-data\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.906679 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-86dffc6c94-q7pl8"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.909619 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.911581 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.918853 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xt95g\" (UniqueName: \"kubernetes.io/projected/595b4bfb-0e2f-41d7-88b1-8aabdb432b2e-kube-api-access-xt95g\") pod \"barbican-keystone-listener-bdb85dc66-qzsm2\" (UID: \"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e\") " pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.920078 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86dffc6c94-q7pl8"] Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.943584 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6b55c79c5-6tv8n" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956107 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956147 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956179 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data-custom\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956203 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956232 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-combined-ca-bundle\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956253 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-logs\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956282 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szj4r\" (UniqueName: \"kubernetes.io/projected/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-kube-api-access-szj4r\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956326 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956357 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rprt\" (UniqueName: \"kubernetes.io/projected/cb02c144-1900-4e7a-9bd3-7639134ee60a-kube-api-access-9rprt\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956381 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-config\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.956425 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.957425 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.957881 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.958036 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.958441 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.958501 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-config\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:44 crc kubenswrapper[5032]: I0122 16:24:44.994792 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rprt\" (UniqueName: \"kubernetes.io/projected/cb02c144-1900-4e7a-9bd3-7639134ee60a-kube-api-access-9rprt\") pod \"dnsmasq-dns-848cf88cfc-jrlfk\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.029009 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.057154 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data-custom\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.057194 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.057219 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-combined-ca-bundle\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.057239 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-logs\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.057267 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szj4r\" (UniqueName: \"kubernetes.io/projected/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-kube-api-access-szj4r\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.059981 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-logs\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.068817 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data-custom\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.069545 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-combined-ca-bundle\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.069783 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.077793 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szj4r\" (UniqueName: \"kubernetes.io/projected/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-kube-api-access-szj4r\") pod \"barbican-api-86dffc6c94-q7pl8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.151046 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.329425 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.333382 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9dff9df7-cgg9f" event={"ID":"31f3bc59-5ab8-4277-a1cf-d496da47a1a1","Type":"ContainerStarted","Data":"e80944009abf512616f710e5cbe207955167e57923b9a30e0aba8176037d176d"} Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.333772 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.335597 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerStarted","Data":"51350ee0ec801a4d0f008e7d8bdf969629526afe6232f2183db7e6682239384f"} Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.335712 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" podUID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerName="dnsmasq-dns" containerID="cri-o://625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960" gracePeriod=10 Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.357617 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c9dff9df7-cgg9f" podStartSLOduration=3.357564556 podStartE2EDuration="3.357564556s" podCreationTimestamp="2026-01-22 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:45.353956719 +0000 UTC m=+1189.166125740" watchObservedRunningTime="2026-01-22 16:24:45.357564556 +0000 UTC m=+1189.169733587" Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.483926 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6b55c79c5-6tv8n"] Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.569540 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-bdb85dc66-qzsm2"] Jan 22 16:24:45 crc kubenswrapper[5032]: W0122 16:24:45.570293 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod595b4bfb_0e2f_41d7_88b1_8aabdb432b2e.slice/crio-5ad02b9d1dba2f55e95127be8580030250c768c8bf500edc0cac06b597488437 WatchSource:0}: Error finding container 5ad02b9d1dba2f55e95127be8580030250c768c8bf500edc0cac06b597488437: Status 404 returned error can't find the container with id 5ad02b9d1dba2f55e95127be8580030250c768c8bf500edc0cac06b597488437 Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.670942 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jrlfk"] Jan 22 16:24:45 crc kubenswrapper[5032]: I0122 16:24:45.816274 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86dffc6c94-q7pl8"] Jan 22 16:24:46 crc kubenswrapper[5032]: I0122 16:24:46.347334 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" event={"ID":"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e","Type":"ContainerStarted","Data":"5ad02b9d1dba2f55e95127be8580030250c768c8bf500edc0cac06b597488437"} Jan 22 16:24:46 crc kubenswrapper[5032]: I0122 16:24:46.348825 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" event={"ID":"cb02c144-1900-4e7a-9bd3-7639134ee60a","Type":"ContainerStarted","Data":"37a680ac688f43b976229e2c1cdb2c30262b348fce2d49f4176651da2f34341c"} Jan 22 16:24:46 crc kubenswrapper[5032]: I0122 16:24:46.350173 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dffc6c94-q7pl8" event={"ID":"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8","Type":"ContainerStarted","Data":"4bcb3da0b4aba06b6fcd489b4fd36a2bbece4c0d43fefec38d15863cb54de2cc"} Jan 22 16:24:46 crc kubenswrapper[5032]: I0122 16:24:46.352097 5032 generic.go:334] "Generic (PLEG): container finished" podID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerID="c19e7c4f249df844077ca639ddf4702d340f082798870e59ea1c0a5891e3d050" exitCode=0 Jan 22 16:24:46 crc kubenswrapper[5032]: I0122 16:24:46.352185 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bf69fb-rdlps" event={"ID":"a8bf326d-2513-4e16-8c33-c23fa702de8d","Type":"ContainerDied","Data":"c19e7c4f249df844077ca639ddf4702d340f082798870e59ea1c0a5891e3d050"} Jan 22 16:24:46 crc kubenswrapper[5032]: I0122 16:24:46.353277 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b55c79c5-6tv8n" event={"ID":"c4d1b24b-8e40-4f2c-9948-55faab954707","Type":"ContainerStarted","Data":"4dcab14e2335fafec78e450d138cfdda737b9c93d97ef4252dc5f4b502143e83"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.190239 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.221579 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bf69fb-rdlps" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.303260 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-nb\") pod \"89dac56e-0d23-4571-8865-36a1bbfa9731\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.304801 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-svc\") pod \"89dac56e-0d23-4571-8865-36a1bbfa9731\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.304843 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-config\") pod \"89dac56e-0d23-4571-8865-36a1bbfa9731\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.304907 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgp6h\" (UniqueName: \"kubernetes.io/projected/89dac56e-0d23-4571-8865-36a1bbfa9731-kube-api-access-mgp6h\") pod \"89dac56e-0d23-4571-8865-36a1bbfa9731\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.304929 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-sb\") pod \"89dac56e-0d23-4571-8865-36a1bbfa9731\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.304951 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-swift-storage-0\") pod \"89dac56e-0d23-4571-8865-36a1bbfa9731\" (UID: \"89dac56e-0d23-4571-8865-36a1bbfa9731\") " Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.322833 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89dac56e-0d23-4571-8865-36a1bbfa9731-kube-api-access-mgp6h" (OuterVolumeSpecName: "kube-api-access-mgp6h") pod "89dac56e-0d23-4571-8865-36a1bbfa9731" (UID: "89dac56e-0d23-4571-8865-36a1bbfa9731"). InnerVolumeSpecName "kube-api-access-mgp6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.390044 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerStarted","Data":"10cb1dd6a0c799da430b5ffa57aa888a5c6df142f4409db35c8742c4b0a1c358"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.398896 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "89dac56e-0d23-4571-8865-36a1bbfa9731" (UID: "89dac56e-0d23-4571-8865-36a1bbfa9731"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.403708 5032 generic.go:334] "Generic (PLEG): container finished" podID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerID="625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960" exitCode=0 Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.403785 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" event={"ID":"89dac56e-0d23-4571-8865-36a1bbfa9731","Type":"ContainerDied","Data":"625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.403819 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" event={"ID":"89dac56e-0d23-4571-8865-36a1bbfa9731","Type":"ContainerDied","Data":"a5094480169d78b46b6ee46b8a61c6761e346fdb975ef8c9394edf6e3f6e6189"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.403840 5032 scope.go:117] "RemoveContainer" containerID="625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.404024 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-t7vtj" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.411128 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgp6h\" (UniqueName: \"kubernetes.io/projected/89dac56e-0d23-4571-8865-36a1bbfa9731-kube-api-access-mgp6h\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.411157 5032 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.421716 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "89dac56e-0d23-4571-8865-36a1bbfa9731" (UID: "89dac56e-0d23-4571-8865-36a1bbfa9731"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.426507 5032 generic.go:334] "Generic (PLEG): container finished" podID="857e311d-5f3f-4c81-9f86-6d0a3c9e0307" containerID="27e06268d6656feccfcd5f2b92f2c1d29cc816b29fe34dce87415dfe46f214d7" exitCode=0 Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.426585 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dxmk2" event={"ID":"857e311d-5f3f-4c81-9f86-6d0a3c9e0307","Type":"ContainerDied","Data":"27e06268d6656feccfcd5f2b92f2c1d29cc816b29fe34dce87415dfe46f214d7"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.430236 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "89dac56e-0d23-4571-8865-36a1bbfa9731" (UID: "89dac56e-0d23-4571-8865-36a1bbfa9731"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.430532 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "89dac56e-0d23-4571-8865-36a1bbfa9731" (UID: "89dac56e-0d23-4571-8865-36a1bbfa9731"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.440543 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-config" (OuterVolumeSpecName: "config") pod "89dac56e-0d23-4571-8865-36a1bbfa9731" (UID: "89dac56e-0d23-4571-8865-36a1bbfa9731"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.443017 5032 generic.go:334] "Generic (PLEG): container finished" podID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerID="e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03" exitCode=0 Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.443131 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" event={"ID":"cb02c144-1900-4e7a-9bd3-7639134ee60a","Type":"ContainerDied","Data":"e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.457960 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dffc6c94-q7pl8" event={"ID":"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8","Type":"ContainerStarted","Data":"82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.458473 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dffc6c94-q7pl8" event={"ID":"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8","Type":"ContainerStarted","Data":"62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425"} Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.459262 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.459297 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.505901 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-86dffc6c94-q7pl8" podStartSLOduration=3.505852821 podStartE2EDuration="3.505852821s" podCreationTimestamp="2026-01-22 16:24:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:47.495408229 +0000 UTC m=+1191.307577250" watchObservedRunningTime="2026-01-22 16:24:47.505852821 +0000 UTC m=+1191.318021852" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.515306 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.515372 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.515385 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.515402 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89dac56e-0d23-4571-8865-36a1bbfa9731-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.742300 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-t7vtj"] Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.751403 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-t7vtj"] Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.804762 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5fb7f99d8d-bm84s"] Jan 22 16:24:47 crc kubenswrapper[5032]: E0122 16:24:47.805157 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerName="dnsmasq-dns" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.805174 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerName="dnsmasq-dns" Jan 22 16:24:47 crc kubenswrapper[5032]: E0122 16:24:47.805187 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerName="init" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.805193 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerName="init" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.805372 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="89dac56e-0d23-4571-8865-36a1bbfa9731" containerName="dnsmasq-dns" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.806218 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.809679 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.817121 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5fb7f99d8d-bm84s"] Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.878046 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.978148 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-logs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.978196 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-combined-ca-bundle\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.978220 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9d7mq\" (UniqueName: \"kubernetes.io/projected/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-kube-api-access-9d7mq\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.978645 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-internal-tls-certs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.978696 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-public-tls-certs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.978755 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-config-data-custom\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:47 crc kubenswrapper[5032]: I0122 16:24:47.978790 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-config-data\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.080489 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-internal-tls-certs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.080555 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-public-tls-certs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.080658 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-config-data-custom\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.080703 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-config-data\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.080791 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-logs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.080822 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-combined-ca-bundle\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.080851 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9d7mq\" (UniqueName: \"kubernetes.io/projected/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-kube-api-access-9d7mq\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.082467 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-logs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.085302 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-internal-tls-certs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.088548 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-combined-ca-bundle\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.089109 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-config-data\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.090467 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-public-tls-certs\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.095720 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-config-data-custom\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.100409 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9d7mq\" (UniqueName: \"kubernetes.io/projected/48d84a1b-977f-4127-b0ed-cdf2a93ad6e4-kube-api-access-9d7mq\") pod \"barbican-api-5fb7f99d8d-bm84s\" (UID: \"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4\") " pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.191118 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:48 crc kubenswrapper[5032]: I0122 16:24:48.471803 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89dac56e-0d23-4571-8865-36a1bbfa9731" path="/var/lib/kubelet/pods/89dac56e-0d23-4571-8865-36a1bbfa9731/volumes" Jan 22 16:24:49 crc kubenswrapper[5032]: I0122 16:24:49.956723 5032 scope.go:117] "RemoveContainer" containerID="f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.072661 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.124410 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-config-data\") pod \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.124509 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-db-sync-config-data\") pod \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.124623 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-etc-machine-id\") pod \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.124662 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-combined-ca-bundle\") pod \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.124735 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrjv4\" (UniqueName: \"kubernetes.io/projected/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-kube-api-access-mrjv4\") pod \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.124775 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-scripts\") pod \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\" (UID: \"857e311d-5f3f-4c81-9f86-6d0a3c9e0307\") " Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.124736 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "857e311d-5f3f-4c81-9f86-6d0a3c9e0307" (UID: "857e311d-5f3f-4c81-9f86-6d0a3c9e0307"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.125252 5032 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.128452 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-kube-api-access-mrjv4" (OuterVolumeSpecName: "kube-api-access-mrjv4") pod "857e311d-5f3f-4c81-9f86-6d0a3c9e0307" (UID: "857e311d-5f3f-4c81-9f86-6d0a3c9e0307"). InnerVolumeSpecName "kube-api-access-mrjv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.129501 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "857e311d-5f3f-4c81-9f86-6d0a3c9e0307" (UID: "857e311d-5f3f-4c81-9f86-6d0a3c9e0307"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.129581 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-scripts" (OuterVolumeSpecName: "scripts") pod "857e311d-5f3f-4c81-9f86-6d0a3c9e0307" (UID: "857e311d-5f3f-4c81-9f86-6d0a3c9e0307"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.148045 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "857e311d-5f3f-4c81-9f86-6d0a3c9e0307" (UID: "857e311d-5f3f-4c81-9f86-6d0a3c9e0307"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.188655 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-config-data" (OuterVolumeSpecName: "config-data") pod "857e311d-5f3f-4c81-9f86-6d0a3c9e0307" (UID: "857e311d-5f3f-4c81-9f86-6d0a3c9e0307"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.227636 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.227692 5032 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.227711 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.227728 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrjv4\" (UniqueName: \"kubernetes.io/projected/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-kube-api-access-mrjv4\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.227749 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857e311d-5f3f-4c81-9f86-6d0a3c9e0307-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.507951 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dxmk2" Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.507957 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dxmk2" event={"ID":"857e311d-5f3f-4c81-9f86-6d0a3c9e0307","Type":"ContainerDied","Data":"4a893f5990a0b852305bf95912792f30a34a167492af638f1311fc8ea5ca6578"} Jan 22 16:24:50 crc kubenswrapper[5032]: I0122 16:24:50.508097 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a893f5990a0b852305bf95912792f30a34a167492af638f1311fc8ea5ca6578" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.433481 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:24:51 crc kubenswrapper[5032]: E0122 16:24:51.436984 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="857e311d-5f3f-4c81-9f86-6d0a3c9e0307" containerName="cinder-db-sync" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.437099 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="857e311d-5f3f-4c81-9f86-6d0a3c9e0307" containerName="cinder-db-sync" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.437417 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="857e311d-5f3f-4c81-9f86-6d0a3c9e0307" containerName="cinder-db-sync" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.438613 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.446306 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-tgncl" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.446455 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.446763 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.447314 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.504588 5032 scope.go:117] "RemoveContainer" containerID="625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960" Jan 22 16:24:51 crc kubenswrapper[5032]: E0122 16:24:51.509947 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960\": container with ID starting with 625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960 not found: ID does not exist" containerID="625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.509988 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960"} err="failed to get container status \"625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960\": rpc error: code = NotFound desc = could not find container \"625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960\": container with ID starting with 625297365f8e6811874c512df433bca933bdf5e2f94e560ca31ac9a515f7b960 not found: ID does not exist" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.510013 5032 scope.go:117] "RemoveContainer" containerID="f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c" Jan 22 16:24:51 crc kubenswrapper[5032]: E0122 16:24:51.519977 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c\": container with ID starting with f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c not found: ID does not exist" containerID="f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.520010 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c"} err="failed to get container status \"f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c\": rpc error: code = NotFound desc = could not find container \"f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c\": container with ID starting with f1c0cf7924310e0b59505058388a3fde688e49f82fdab1af0740595ab6c3847c not found: ID does not exist" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.541632 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.554454 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.554681 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3d674820-800a-4969-ad68-93a53f9d3ad5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.554843 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt4m5\" (UniqueName: \"kubernetes.io/projected/3d674820-800a-4969-ad68-93a53f9d3ad5-kube-api-access-zt4m5\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.554972 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.555109 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-scripts\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.555221 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.642378 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jrlfk"] Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.656831 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.656925 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3d674820-800a-4969-ad68-93a53f9d3ad5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.656979 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt4m5\" (UniqueName: \"kubernetes.io/projected/3d674820-800a-4969-ad68-93a53f9d3ad5-kube-api-access-zt4m5\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.657005 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.657059 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-scripts\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.657095 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.658983 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3d674820-800a-4969-ad68-93a53f9d3ad5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.670670 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-scripts\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.677981 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.683668 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.702682 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.729334 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-84wwk"] Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.730824 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.732652 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt4m5\" (UniqueName: \"kubernetes.io/projected/3d674820-800a-4969-ad68-93a53f9d3ad5-kube-api-access-zt4m5\") pod \"cinder-scheduler-0\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.779160 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.780671 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.783995 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.812936 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-84wwk"] Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.829705 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.855340 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.862600 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.862667 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld7cf\" (UniqueName: \"kubernetes.io/projected/3f47d550-9337-412b-be06-1034c44509c9-kube-api-access-ld7cf\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.862696 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.862717 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.862796 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-svc\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.862814 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-config\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.963953 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-svc\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964024 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-config\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964050 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lppnb\" (UniqueName: \"kubernetes.io/projected/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-kube-api-access-lppnb\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964070 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data-custom\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964111 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964150 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-logs\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964196 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld7cf\" (UniqueName: \"kubernetes.io/projected/3f47d550-9337-412b-be06-1034c44509c9-kube-api-access-ld7cf\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964220 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964270 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964296 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-etc-machine-id\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964332 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-scripts\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964348 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964365 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.964752 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-svc\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.965632 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-config\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.965660 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.966249 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.966275 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:51 crc kubenswrapper[5032]: I0122 16:24:51.997718 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld7cf\" (UniqueName: \"kubernetes.io/projected/3f47d550-9337-412b-be06-1034c44509c9-kube-api-access-ld7cf\") pod \"dnsmasq-dns-6578955fd5-84wwk\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.067875 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lppnb\" (UniqueName: \"kubernetes.io/projected/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-kube-api-access-lppnb\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.067943 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data-custom\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.067991 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-logs\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.068057 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-etc-machine-id\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.068094 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-scripts\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.068116 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.068135 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.068371 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-etc-machine-id\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.070037 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-logs\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.074581 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data-custom\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.077656 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.083292 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.084650 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-scripts\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.087303 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.096329 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lppnb\" (UniqueName: \"kubernetes.io/projected/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-kube-api-access-lppnb\") pod \"cinder-api-0\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.152913 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.166384 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5fb7f99d8d-bm84s"] Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.418405 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:24:52 crc kubenswrapper[5032]: W0122 16:24:52.427293 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d674820_800a_4969_ad68_93a53f9d3ad5.slice/crio-ff15fc56f7b9ab77ba14a68c7dc1eeb90addf650705d0d0f946b0c58c54f7e4d WatchSource:0}: Error finding container ff15fc56f7b9ab77ba14a68c7dc1eeb90addf650705d0d0f946b0c58c54f7e4d: Status 404 returned error can't find the container with id ff15fc56f7b9ab77ba14a68c7dc1eeb90addf650705d0d0f946b0c58c54f7e4d Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.551962 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" event={"ID":"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e","Type":"ContainerStarted","Data":"b0e189065ef440f91be2edfd60bcd42eed59ec05db804f09efa8f20cbaa3c6e3"} Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.552076 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" event={"ID":"595b4bfb-0e2f-41d7-88b1-8aabdb432b2e","Type":"ContainerStarted","Data":"11f13b64113df854c56f83800308f747da47454bca55c021577ef5bcbe86c1a7"} Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.555556 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" event={"ID":"cb02c144-1900-4e7a-9bd3-7639134ee60a","Type":"ContainerStarted","Data":"d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1"} Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.555824 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" podUID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerName="dnsmasq-dns" containerID="cri-o://d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1" gracePeriod=10 Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.556036 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.565471 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b55c79c5-6tv8n" event={"ID":"c4d1b24b-8e40-4f2c-9948-55faab954707","Type":"ContainerStarted","Data":"f317088298c60766a2302a1608f4c6e30992f93a568ee0c061f947029af46f83"} Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.585157 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5fb7f99d8d-bm84s" event={"ID":"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4","Type":"ContainerStarted","Data":"df6846d492c2e2a314ef9791fd8b1c41b66a4050669483b91e4d9cb0da6285ad"} Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.599794 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerStarted","Data":"d5847f0b2261e34359b1d0180291fa0609016e101e77c39044ab690dd2faba90"} Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.607350 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3d674820-800a-4969-ad68-93a53f9d3ad5","Type":"ContainerStarted","Data":"ff15fc56f7b9ab77ba14a68c7dc1eeb90addf650705d0d0f946b0c58c54f7e4d"} Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.608735 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" podStartSLOduration=8.608719701 podStartE2EDuration="8.608719701s" podCreationTimestamp="2026-01-22 16:24:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:52.607260082 +0000 UTC m=+1196.419429113" watchObservedRunningTime="2026-01-22 16:24:52.608719701 +0000 UTC m=+1196.420888732" Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.613752 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-bdb85dc66-qzsm2" podStartSLOduration=2.508876895 podStartE2EDuration="8.613737577s" podCreationTimestamp="2026-01-22 16:24:44 +0000 UTC" firstStartedPulling="2026-01-22 16:24:45.573178451 +0000 UTC m=+1189.385347472" lastFinishedPulling="2026-01-22 16:24:51.678039123 +0000 UTC m=+1195.490208154" observedRunningTime="2026-01-22 16:24:52.579032881 +0000 UTC m=+1196.391201922" watchObservedRunningTime="2026-01-22 16:24:52.613737577 +0000 UTC m=+1196.425906608" Jan 22 16:24:52 crc kubenswrapper[5032]: W0122 16:24:52.752754 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f47d550_9337_412b_be06_1034c44509c9.slice/crio-bcc0feac34974df6bfcfd803062b03af93d586b6bed124963421fef53af00427 WatchSource:0}: Error finding container bcc0feac34974df6bfcfd803062b03af93d586b6bed124963421fef53af00427: Status 404 returned error can't find the container with id bcc0feac34974df6bfcfd803062b03af93d586b6bed124963421fef53af00427 Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.754599 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-84wwk"] Jan 22 16:24:52 crc kubenswrapper[5032]: I0122 16:24:52.918825 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:52 crc kubenswrapper[5032]: W0122 16:24:52.933971 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19c81dd6_8a6b_4029_9b0a_1b9c550ab29e.slice/crio-7dd776e331a09348dffcf4dcfea7bc8248066394d461bb7a14326c8356d57913 WatchSource:0}: Error finding container 7dd776e331a09348dffcf4dcfea7bc8248066394d461bb7a14326c8356d57913: Status 404 returned error can't find the container with id 7dd776e331a09348dffcf4dcfea7bc8248066394d461bb7a14326c8356d57913 Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.236303 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.351673 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-swift-storage-0\") pod \"cb02c144-1900-4e7a-9bd3-7639134ee60a\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.351778 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-nb\") pod \"cb02c144-1900-4e7a-9bd3-7639134ee60a\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.351876 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-svc\") pod \"cb02c144-1900-4e7a-9bd3-7639134ee60a\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.351948 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rprt\" (UniqueName: \"kubernetes.io/projected/cb02c144-1900-4e7a-9bd3-7639134ee60a-kube-api-access-9rprt\") pod \"cb02c144-1900-4e7a-9bd3-7639134ee60a\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.351994 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-sb\") pod \"cb02c144-1900-4e7a-9bd3-7639134ee60a\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.352054 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-config\") pod \"cb02c144-1900-4e7a-9bd3-7639134ee60a\" (UID: \"cb02c144-1900-4e7a-9bd3-7639134ee60a\") " Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.372047 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb02c144-1900-4e7a-9bd3-7639134ee60a-kube-api-access-9rprt" (OuterVolumeSpecName: "kube-api-access-9rprt") pod "cb02c144-1900-4e7a-9bd3-7639134ee60a" (UID: "cb02c144-1900-4e7a-9bd3-7639134ee60a"). InnerVolumeSpecName "kube-api-access-9rprt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.443475 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cb02c144-1900-4e7a-9bd3-7639134ee60a" (UID: "cb02c144-1900-4e7a-9bd3-7639134ee60a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.461430 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cb02c144-1900-4e7a-9bd3-7639134ee60a" (UID: "cb02c144-1900-4e7a-9bd3-7639134ee60a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.476136 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.476951 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rprt\" (UniqueName: \"kubernetes.io/projected/cb02c144-1900-4e7a-9bd3-7639134ee60a-kube-api-access-9rprt\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.477069 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.479554 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-config" (OuterVolumeSpecName: "config") pod "cb02c144-1900-4e7a-9bd3-7639134ee60a" (UID: "cb02c144-1900-4e7a-9bd3-7639134ee60a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.483325 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cb02c144-1900-4e7a-9bd3-7639134ee60a" (UID: "cb02c144-1900-4e7a-9bd3-7639134ee60a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.489627 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cb02c144-1900-4e7a-9bd3-7639134ee60a" (UID: "cb02c144-1900-4e7a-9bd3-7639134ee60a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.582063 5032 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.582093 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.582102 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb02c144-1900-4e7a-9bd3-7639134ee60a-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.587559 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.624197 5032 generic.go:334] "Generic (PLEG): container finished" podID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerID="d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1" exitCode=0 Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.624285 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" event={"ID":"cb02c144-1900-4e7a-9bd3-7639134ee60a","Type":"ContainerDied","Data":"d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.624312 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" event={"ID":"cb02c144-1900-4e7a-9bd3-7639134ee60a","Type":"ContainerDied","Data":"37a680ac688f43b976229e2c1cdb2c30262b348fce2d49f4176651da2f34341c"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.624329 5032 scope.go:117] "RemoveContainer" containerID="d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.624428 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jrlfk" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.663877 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b55c79c5-6tv8n" event={"ID":"c4d1b24b-8e40-4f2c-9948-55faab954707","Type":"ContainerStarted","Data":"820e0582c519b517574dcbf1de54d6c3874acc3aea70e2e8ae884d1677202c16"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.669667 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jrlfk"] Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.675947 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5fb7f99d8d-bm84s" event={"ID":"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4","Type":"ContainerStarted","Data":"350080382fe536b820073296fb978937ba227151f39dee59f764110ab5dae8ef"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.675994 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5fb7f99d8d-bm84s" event={"ID":"48d84a1b-977f-4127-b0ed-cdf2a93ad6e4","Type":"ContainerStarted","Data":"88807870d7ce4135ee210288a3cfd7f224ffce70ed691693e9720d78c5bd0eb1"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.676040 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.676058 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.678271 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jrlfk"] Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.686239 5032 generic.go:334] "Generic (PLEG): container finished" podID="3f47d550-9337-412b-be06-1034c44509c9" containerID="be08ce8794a867ea080097dc3b1577e11decd52009750084d34cbe6207491f79" exitCode=0 Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.686303 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" event={"ID":"3f47d550-9337-412b-be06-1034c44509c9","Type":"ContainerDied","Data":"be08ce8794a867ea080097dc3b1577e11decd52009750084d34cbe6207491f79"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.686331 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" event={"ID":"3f47d550-9337-412b-be06-1034c44509c9","Type":"ContainerStarted","Data":"bcc0feac34974df6bfcfd803062b03af93d586b6bed124963421fef53af00427"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.692383 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6b55c79c5-6tv8n" podStartSLOduration=3.691692291 podStartE2EDuration="9.692365365s" podCreationTimestamp="2026-01-22 16:24:44 +0000 UTC" firstStartedPulling="2026-01-22 16:24:45.503849111 +0000 UTC m=+1189.316018142" lastFinishedPulling="2026-01-22 16:24:51.504522185 +0000 UTC m=+1195.316691216" observedRunningTime="2026-01-22 16:24:53.685425718 +0000 UTC m=+1197.497594749" watchObservedRunningTime="2026-01-22 16:24:53.692365365 +0000 UTC m=+1197.504534386" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.707031 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5fb7f99d8d-bm84s" podStartSLOduration=6.70701544 podStartE2EDuration="6.70701544s" podCreationTimestamp="2026-01-22 16:24:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:53.7047902 +0000 UTC m=+1197.516959231" watchObservedRunningTime="2026-01-22 16:24:53.70701544 +0000 UTC m=+1197.519184471" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.738819 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e","Type":"ContainerStarted","Data":"7dd776e331a09348dffcf4dcfea7bc8248066394d461bb7a14326c8356d57913"} Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.740457 5032 scope.go:117] "RemoveContainer" containerID="e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.777880 5032 scope.go:117] "RemoveContainer" containerID="d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1" Jan 22 16:24:53 crc kubenswrapper[5032]: E0122 16:24:53.782155 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1\": container with ID starting with d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1 not found: ID does not exist" containerID="d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.782201 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1"} err="failed to get container status \"d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1\": rpc error: code = NotFound desc = could not find container \"d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1\": container with ID starting with d2a9906560f6762ff64452c80b4451e6d7af368c0f1c9f3303800047a7d2b4d1 not found: ID does not exist" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.782228 5032 scope.go:117] "RemoveContainer" containerID="e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03" Jan 22 16:24:53 crc kubenswrapper[5032]: E0122 16:24:53.787962 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03\": container with ID starting with e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03 not found: ID does not exist" containerID="e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03" Jan 22 16:24:53 crc kubenswrapper[5032]: I0122 16:24:53.787997 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03"} err="failed to get container status \"e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03\": rpc error: code = NotFound desc = could not find container \"e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03\": container with ID starting with e747ed9d3095b69ed06ed5e8e4468a4d95e74f9c35d0d3da8b8332e8d3689c03 not found: ID does not exist" Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.470634 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb02c144-1900-4e7a-9bd3-7639134ee60a" path="/var/lib/kubelet/pods/cb02c144-1900-4e7a-9bd3-7639134ee60a/volumes" Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.752088 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerStarted","Data":"1be1fd44808132a7c830f0d2e5e400347571c06168780584b7a84a16f9b1aade"} Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.752173 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.762624 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" event={"ID":"3f47d550-9337-412b-be06-1034c44509c9","Type":"ContainerStarted","Data":"ee7960c8547486c961e372b3a389acdc7d5ba27372e55f58797f611a451c97c5"} Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.763060 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.765963 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3d674820-800a-4969-ad68-93a53f9d3ad5","Type":"ContainerStarted","Data":"b166be98ee151d10675b10c7940dd2f4f79dbc1b2cca42f2ed8068a492ddd0b4"} Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.773470 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e","Type":"ContainerStarted","Data":"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361"} Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.794904 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.897512171 podStartE2EDuration="12.794871397s" podCreationTimestamp="2026-01-22 16:24:42 +0000 UTC" firstStartedPulling="2026-01-22 16:24:43.644978303 +0000 UTC m=+1187.457147334" lastFinishedPulling="2026-01-22 16:24:53.542337529 +0000 UTC m=+1197.354506560" observedRunningTime="2026-01-22 16:24:54.780347875 +0000 UTC m=+1198.592516926" watchObservedRunningTime="2026-01-22 16:24:54.794871397 +0000 UTC m=+1198.607040428" Jan 22 16:24:54 crc kubenswrapper[5032]: I0122 16:24:54.808941 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" podStartSLOduration=3.8088266429999997 podStartE2EDuration="3.808826643s" podCreationTimestamp="2026-01-22 16:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:54.798535985 +0000 UTC m=+1198.610705026" watchObservedRunningTime="2026-01-22 16:24:54.808826643 +0000 UTC m=+1198.620995674" Jan 22 16:24:55 crc kubenswrapper[5032]: I0122 16:24:55.803442 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3d674820-800a-4969-ad68-93a53f9d3ad5","Type":"ContainerStarted","Data":"8b3b552af45a101bd0309708d49b12d44ecb4b32a588fec73666ef5b9e07c86e"} Jan 22 16:24:55 crc kubenswrapper[5032]: I0122 16:24:55.807583 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api-log" containerID="cri-o://a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361" gracePeriod=30 Jan 22 16:24:55 crc kubenswrapper[5032]: I0122 16:24:55.807797 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e","Type":"ContainerStarted","Data":"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb"} Jan 22 16:24:55 crc kubenswrapper[5032]: I0122 16:24:55.808224 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 16:24:55 crc kubenswrapper[5032]: I0122 16:24:55.808275 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api" containerID="cri-o://1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb" gracePeriod=30 Jan 22 16:24:55 crc kubenswrapper[5032]: I0122 16:24:55.839205 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.727478918 podStartE2EDuration="4.838019077s" podCreationTimestamp="2026-01-22 16:24:51 +0000 UTC" firstStartedPulling="2026-01-22 16:24:52.429734114 +0000 UTC m=+1196.241903145" lastFinishedPulling="2026-01-22 16:24:53.540274273 +0000 UTC m=+1197.352443304" observedRunningTime="2026-01-22 16:24:55.83441612 +0000 UTC m=+1199.646585141" watchObservedRunningTime="2026-01-22 16:24:55.838019077 +0000 UTC m=+1199.650188108" Jan 22 16:24:55 crc kubenswrapper[5032]: I0122 16:24:55.862948 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.862928629 podStartE2EDuration="4.862928629s" podCreationTimestamp="2026-01-22 16:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:55.85702879 +0000 UTC m=+1199.669197821" watchObservedRunningTime="2026-01-22 16:24:55.862928629 +0000 UTC m=+1199.675097660" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.435564 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549445 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-scripts\") pod \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549547 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data\") pod \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549626 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data-custom\") pod \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549695 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-etc-machine-id\") pod \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549715 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-combined-ca-bundle\") pod \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549736 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lppnb\" (UniqueName: \"kubernetes.io/projected/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-kube-api-access-lppnb\") pod \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549788 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-logs\") pod \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\" (UID: \"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e\") " Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.549950 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" (UID: "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.550388 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-logs" (OuterVolumeSpecName: "logs") pod "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" (UID: "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.550797 5032 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.550813 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.567750 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-scripts" (OuterVolumeSpecName: "scripts") pod "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" (UID: "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.570055 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" (UID: "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.570503 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-kube-api-access-lppnb" (OuterVolumeSpecName: "kube-api-access-lppnb") pod "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" (UID: "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e"). InnerVolumeSpecName "kube-api-access-lppnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.579404 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" (UID: "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.611713 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data" (OuterVolumeSpecName: "config-data") pod "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" (UID: "19c81dd6-8a6b-4029-9b0a-1b9c550ab29e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.652516 5032 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.652895 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.652909 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lppnb\" (UniqueName: \"kubernetes.io/projected/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-kube-api-access-lppnb\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.652924 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.652937 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.818097 5032 generic.go:334] "Generic (PLEG): container finished" podID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerID="1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb" exitCode=0 Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.818318 5032 generic.go:334] "Generic (PLEG): container finished" podID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerID="a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361" exitCode=143 Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.819426 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.819933 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e","Type":"ContainerDied","Data":"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb"} Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.819970 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e","Type":"ContainerDied","Data":"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361"} Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.820030 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"19c81dd6-8a6b-4029-9b0a-1b9c550ab29e","Type":"ContainerDied","Data":"7dd776e331a09348dffcf4dcfea7bc8248066394d461bb7a14326c8356d57913"} Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.820052 5032 scope.go:117] "RemoveContainer" containerID="1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.843927 5032 scope.go:117] "RemoveContainer" containerID="a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.848801 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.856001 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.863883 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.888607 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.899814 5032 scope.go:117] "RemoveContainer" containerID="1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb" Jan 22 16:24:56 crc kubenswrapper[5032]: E0122 16:24:56.900459 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb\": container with ID starting with 1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb not found: ID does not exist" containerID="1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.900548 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb"} err="failed to get container status \"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb\": rpc error: code = NotFound desc = could not find container \"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb\": container with ID starting with 1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb not found: ID does not exist" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.900625 5032 scope.go:117] "RemoveContainer" containerID="a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361" Jan 22 16:24:56 crc kubenswrapper[5032]: E0122 16:24:56.901842 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361\": container with ID starting with a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361 not found: ID does not exist" containerID="a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.901950 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361"} err="failed to get container status \"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361\": rpc error: code = NotFound desc = could not find container \"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361\": container with ID starting with a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361 not found: ID does not exist" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.902019 5032 scope.go:117] "RemoveContainer" containerID="1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.907985 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb"} err="failed to get container status \"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb\": rpc error: code = NotFound desc = could not find container \"1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb\": container with ID starting with 1f78877825a30469a24cbb40c926ac9bf321c399acd06467f621edc56bbe05cb not found: ID does not exist" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.908029 5032 scope.go:117] "RemoveContainer" containerID="a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.909502 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361"} err="failed to get container status \"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361\": rpc error: code = NotFound desc = could not find container \"a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361\": container with ID starting with a5d4e585e619df2e645ba4e99f7a2ca5e0baa199c0829e89f8f384ee7bcc7361 not found: ID does not exist" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.930012 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:56 crc kubenswrapper[5032]: E0122 16:24:56.930597 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerName="dnsmasq-dns" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.930679 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerName="dnsmasq-dns" Jan 22 16:24:56 crc kubenswrapper[5032]: E0122 16:24:56.930738 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerName="init" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.930786 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerName="init" Jan 22 16:24:56 crc kubenswrapper[5032]: E0122 16:24:56.930900 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.930957 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api" Jan 22 16:24:56 crc kubenswrapper[5032]: E0122 16:24:56.931011 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api-log" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.931067 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api-log" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.931275 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.931334 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" containerName="cinder-api-log" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.931391 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb02c144-1900-4e7a-9bd3-7639134ee60a" containerName="dnsmasq-dns" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.932375 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.932542 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.940674 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.940698 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 16:24:56 crc kubenswrapper[5032]: I0122 16:24:56.940744 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.061127 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.061489 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.061714 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-scripts\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.062020 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-config-data-custom\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.062159 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-logs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.062288 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-config-data\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.062426 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.062580 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms8nc\" (UniqueName: \"kubernetes.io/projected/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-kube-api-access-ms8nc\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.062734 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.164786 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.164889 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-scripts\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.164938 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-config-data-custom\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.164967 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-logs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.164991 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-config-data\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.165015 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.165046 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms8nc\" (UniqueName: \"kubernetes.io/projected/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-kube-api-access-ms8nc\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.165082 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.165117 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.165738 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.166707 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-logs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.172245 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.172321 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.172826 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-config-data\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.174840 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-config-data-custom\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.188204 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-scripts\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.188466 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms8nc\" (UniqueName: \"kubernetes.io/projected/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-kube-api-access-ms8nc\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.196644 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4771dd6f-b39e-4e0a-9539-52c3b8a80db3-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4771dd6f-b39e-4e0a-9539-52c3b8a80db3\") " pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.225637 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bf69fb-rdlps" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.293677 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.341228 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:24:57 crc kubenswrapper[5032]: I0122 16:24:57.846400 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 16:24:57 crc kubenswrapper[5032]: W0122 16:24:57.856252 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4771dd6f_b39e_4e0a_9539_52c3b8a80db3.slice/crio-efc5c7f82893a1de508d5fa9927e3d8b9c039e8cd5ddf93472a5fcec7e314d1e WatchSource:0}: Error finding container efc5c7f82893a1de508d5fa9927e3d8b9c039e8cd5ddf93472a5fcec7e314d1e: Status 404 returned error can't find the container with id efc5c7f82893a1de508d5fa9927e3d8b9c039e8cd5ddf93472a5fcec7e314d1e Jan 22 16:24:58 crc kubenswrapper[5032]: I0122 16:24:58.172503 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-69959c7d95-wj9bh" Jan 22 16:24:58 crc kubenswrapper[5032]: I0122 16:24:58.474544 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19c81dd6-8a6b-4029-9b0a-1b9c550ab29e" path="/var/lib/kubelet/pods/19c81dd6-8a6b-4029-9b0a-1b9c550ab29e/volumes" Jan 22 16:24:58 crc kubenswrapper[5032]: I0122 16:24:58.843057 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4771dd6f-b39e-4e0a-9539-52c3b8a80db3","Type":"ContainerStarted","Data":"b3edf1f4e98764e38041d4344cd46b8a6090530a06d4f1877ed0bfb0e2e24ea8"} Jan 22 16:24:58 crc kubenswrapper[5032]: I0122 16:24:58.843107 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4771dd6f-b39e-4e0a-9539-52c3b8a80db3","Type":"ContainerStarted","Data":"efc5c7f82893a1de508d5fa9927e3d8b9c039e8cd5ddf93472a5fcec7e314d1e"} Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.741481 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.852311 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4771dd6f-b39e-4e0a-9539-52c3b8a80db3","Type":"ContainerStarted","Data":"4795e659b7acb6d8d9cba95998a065956121eecbdd6bd12397aa81ea816cd83f"} Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.853470 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.886975 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.886954996 podStartE2EDuration="3.886954996s" podCreationTimestamp="2026-01-22 16:24:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:24:59.873908255 +0000 UTC m=+1203.686077306" watchObservedRunningTime="2026-01-22 16:24:59.886954996 +0000 UTC m=+1203.699124027" Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.921444 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5fb7f99d8d-bm84s" Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.970784 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-86dffc6c94-q7pl8"] Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.971040 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-86dffc6c94-q7pl8" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api-log" containerID="cri-o://62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425" gracePeriod=30 Jan 22 16:24:59 crc kubenswrapper[5032]: I0122 16:24:59.971473 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-86dffc6c94-q7pl8" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api" containerID="cri-o://82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59" gracePeriod=30 Jan 22 16:25:00 crc kubenswrapper[5032]: I0122 16:25:00.860842 5032 generic.go:334] "Generic (PLEG): container finished" podID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerID="62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425" exitCode=143 Jan 22 16:25:00 crc kubenswrapper[5032]: I0122 16:25:00.860942 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dffc6c94-q7pl8" event={"ID":"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8","Type":"ContainerDied","Data":"62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425"} Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.462105 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.463397 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.466890 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.469082 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.470704 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-gk7rr" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.473729 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.598414 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj6zk\" (UniqueName: \"kubernetes.io/projected/149da497-8a18-4333-ae25-49d36efc7538-kube-api-access-qj6zk\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.598816 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/149da497-8a18-4333-ae25-49d36efc7538-openstack-config\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.598846 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149da497-8a18-4333-ae25-49d36efc7538-combined-ca-bundle\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.599345 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/149da497-8a18-4333-ae25-49d36efc7538-openstack-config-secret\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.701419 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/149da497-8a18-4333-ae25-49d36efc7538-openstack-config\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.701487 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149da497-8a18-4333-ae25-49d36efc7538-combined-ca-bundle\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.701532 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/149da497-8a18-4333-ae25-49d36efc7538-openstack-config-secret\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.701596 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj6zk\" (UniqueName: \"kubernetes.io/projected/149da497-8a18-4333-ae25-49d36efc7538-kube-api-access-qj6zk\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.702312 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/149da497-8a18-4333-ae25-49d36efc7538-openstack-config\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.711315 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/149da497-8a18-4333-ae25-49d36efc7538-openstack-config-secret\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.721807 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149da497-8a18-4333-ae25-49d36efc7538-combined-ca-bundle\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.722434 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj6zk\" (UniqueName: \"kubernetes.io/projected/149da497-8a18-4333-ae25-49d36efc7538-kube-api-access-qj6zk\") pod \"openstackclient\" (UID: \"149da497-8a18-4333-ae25-49d36efc7538\") " pod="openstack/openstackclient" Jan 22 16:25:01 crc kubenswrapper[5032]: I0122 16:25:01.799025 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.085537 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.158115 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4859s"] Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.158341 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" podUID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerName="dnsmasq-dns" containerID="cri-o://98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d" gracePeriod=10 Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.165931 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.231339 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.290484 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 16:25:02 crc kubenswrapper[5032]: W0122 16:25:02.298992 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod149da497_8a18_4333_ae25_49d36efc7538.slice/crio-8fdbf6735aca8af938b42cbd803455db2d9256483005af6094f241e15c877c14 WatchSource:0}: Error finding container 8fdbf6735aca8af938b42cbd803455db2d9256483005af6094f241e15c877c14: Status 404 returned error can't find the container with id 8fdbf6735aca8af938b42cbd803455db2d9256483005af6094f241e15c877c14 Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.636031 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.722002 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-sb\") pod \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.722046 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-svc\") pod \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.722082 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-swift-storage-0\") pod \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.722145 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-nb\") pod \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.722173 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fsrm\" (UniqueName: \"kubernetes.io/projected/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-kube-api-access-7fsrm\") pod \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.722193 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-config\") pod \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\" (UID: \"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf\") " Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.728090 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-kube-api-access-7fsrm" (OuterVolumeSpecName: "kube-api-access-7fsrm") pod "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" (UID: "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf"). InnerVolumeSpecName "kube-api-access-7fsrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.785027 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-config" (OuterVolumeSpecName: "config") pod "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" (UID: "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.786529 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" (UID: "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.786696 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" (UID: "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.796900 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" (UID: "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.818425 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" (UID: "9c163df2-9ab0-4298-bd1e-22a5d51cf4bf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.824100 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.824132 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.824143 5032 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.824153 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.824162 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fsrm\" (UniqueName: \"kubernetes.io/projected/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-kube-api-access-7fsrm\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.824172 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.869805 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.879384 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-697d8b94d8-5r62k" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.900734 5032 generic.go:334] "Generic (PLEG): container finished" podID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerID="98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d" exitCode=0 Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.900799 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.900822 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" event={"ID":"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf","Type":"ContainerDied","Data":"98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d"} Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.900872 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4859s" event={"ID":"9c163df2-9ab0-4298-bd1e-22a5d51cf4bf","Type":"ContainerDied","Data":"47917194ca243cb785b58e11385ef3a9dc43b4f7b11661e9657963e03c74de37"} Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.900897 5032 scope.go:117] "RemoveContainer" containerID="98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.902964 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"149da497-8a18-4333-ae25-49d36efc7538","Type":"ContainerStarted","Data":"8fdbf6735aca8af938b42cbd803455db2d9256483005af6094f241e15c877c14"} Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.903633 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="cinder-scheduler" containerID="cri-o://b166be98ee151d10675b10c7940dd2f4f79dbc1b2cca42f2ed8068a492ddd0b4" gracePeriod=30 Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.903711 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="probe" containerID="cri-o://8b3b552af45a101bd0309708d49b12d44ecb4b32a588fec73666ef5b9e07c86e" gracePeriod=30 Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.948332 5032 scope.go:117] "RemoveContainer" containerID="6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.950567 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4859s"] Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.958617 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4859s"] Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.972795 5032 scope.go:117] "RemoveContainer" containerID="98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d" Jan 22 16:25:02 crc kubenswrapper[5032]: E0122 16:25:02.973327 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d\": container with ID starting with 98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d not found: ID does not exist" containerID="98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.973380 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d"} err="failed to get container status \"98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d\": rpc error: code = NotFound desc = could not find container \"98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d\": container with ID starting with 98fd7dea7ec75bb6c1dbadcaf25a6566dc6e02aaad68c680dc90b3d767ed8b8d not found: ID does not exist" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.973410 5032 scope.go:117] "RemoveContainer" containerID="6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a" Jan 22 16:25:02 crc kubenswrapper[5032]: E0122 16:25:02.973726 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a\": container with ID starting with 6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a not found: ID does not exist" containerID="6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a" Jan 22 16:25:02 crc kubenswrapper[5032]: I0122 16:25:02.973759 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a"} err="failed to get container status \"6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a\": rpc error: code = NotFound desc = could not find container \"6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a\": container with ID starting with 6b2adea8d4aed9a765331e38cec2d6d98df051e8bbe610ae2f2f41f95cb6744a not found: ID does not exist" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.115402 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86dffc6c94-q7pl8" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:36954->10.217.0.161:9311: read: connection reset by peer" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.115421 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86dffc6c94-q7pl8" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:36958->10.217.0.161:9311: read: connection reset by peer" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.596709 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.746072 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data\") pod \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.746315 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data-custom\") pod \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.746348 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-combined-ca-bundle\") pod \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.746408 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szj4r\" (UniqueName: \"kubernetes.io/projected/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-kube-api-access-szj4r\") pod \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.746487 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-logs\") pod \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\" (UID: \"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8\") " Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.747252 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-logs" (OuterVolumeSpecName: "logs") pod "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" (UID: "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.751657 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-kube-api-access-szj4r" (OuterVolumeSpecName: "kube-api-access-szj4r") pod "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" (UID: "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8"). InnerVolumeSpecName "kube-api-access-szj4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.758762 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" (UID: "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.772199 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" (UID: "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.798710 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data" (OuterVolumeSpecName: "config-data") pod "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" (UID: "9a2633a4-b63e-4b0f-b68e-4eecbd191ec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.848448 5032 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.848774 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.848786 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szj4r\" (UniqueName: \"kubernetes.io/projected/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-kube-api-access-szj4r\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.848798 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.848808 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.915791 5032 generic.go:334] "Generic (PLEG): container finished" podID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerID="8b3b552af45a101bd0309708d49b12d44ecb4b32a588fec73666ef5b9e07c86e" exitCode=0 Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.915899 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3d674820-800a-4969-ad68-93a53f9d3ad5","Type":"ContainerDied","Data":"8b3b552af45a101bd0309708d49b12d44ecb4b32a588fec73666ef5b9e07c86e"} Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.920759 5032 generic.go:334] "Generic (PLEG): container finished" podID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerID="82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59" exitCode=0 Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.920800 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dffc6c94-q7pl8" event={"ID":"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8","Type":"ContainerDied","Data":"82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59"} Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.920830 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dffc6c94-q7pl8" event={"ID":"9a2633a4-b63e-4b0f-b68e-4eecbd191ec8","Type":"ContainerDied","Data":"4bcb3da0b4aba06b6fcd489b4fd36a2bbece4c0d43fefec38d15863cb54de2cc"} Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.920852 5032 scope.go:117] "RemoveContainer" containerID="82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.920851 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dffc6c94-q7pl8" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.959351 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-86dffc6c94-q7pl8"] Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.964879 5032 scope.go:117] "RemoveContainer" containerID="62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.967430 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-86dffc6c94-q7pl8"] Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.987303 5032 scope.go:117] "RemoveContainer" containerID="82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59" Jan 22 16:25:03 crc kubenswrapper[5032]: E0122 16:25:03.987780 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59\": container with ID starting with 82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59 not found: ID does not exist" containerID="82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.987822 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59"} err="failed to get container status \"82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59\": rpc error: code = NotFound desc = could not find container \"82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59\": container with ID starting with 82441c147dd00d15bd56cc20360440f28e5153cd29d93c78892716f12642bf59 not found: ID does not exist" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.987851 5032 scope.go:117] "RemoveContainer" containerID="62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425" Jan 22 16:25:03 crc kubenswrapper[5032]: E0122 16:25:03.988757 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425\": container with ID starting with 62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425 not found: ID does not exist" containerID="62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425" Jan 22 16:25:03 crc kubenswrapper[5032]: I0122 16:25:03.988784 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425"} err="failed to get container status \"62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425\": rpc error: code = NotFound desc = could not find container \"62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425\": container with ID starting with 62631f421e032358bad3f59f19cb55415456561c69446dd77cb86487da0e7425 not found: ID does not exist" Jan 22 16:25:04 crc kubenswrapper[5032]: I0122 16:25:04.465349 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" path="/var/lib/kubelet/pods/9a2633a4-b63e-4b0f-b68e-4eecbd191ec8/volumes" Jan 22 16:25:04 crc kubenswrapper[5032]: I0122 16:25:04.467181 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" path="/var/lib/kubelet/pods/9c163df2-9ab0-4298-bd1e-22a5d51cf4bf/volumes" Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.120945 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.121687 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="proxy-httpd" containerID="cri-o://1be1fd44808132a7c830f0d2e5e400347571c06168780584b7a84a16f9b1aade" gracePeriod=30 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.121790 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="sg-core" containerID="cri-o://d5847f0b2261e34359b1d0180291fa0609016e101e77c39044ab690dd2faba90" gracePeriod=30 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.121831 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-notification-agent" containerID="cri-o://10cb1dd6a0c799da430b5ffa57aa888a5c6df142f4409db35c8742c4b0a1c358" gracePeriod=30 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.121445 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-central-agent" containerID="cri-o://51350ee0ec801a4d0f008e7d8bdf969629526afe6232f2183db7e6682239384f" gracePeriod=30 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.134209 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.156:3000/\": EOF" Jan 22 16:25:05 crc kubenswrapper[5032]: E0122 16:25:05.697531 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d674820_800a_4969_ad68_93a53f9d3ad5.slice/crio-conmon-b166be98ee151d10675b10c7940dd2f4f79dbc1b2cca42f2ed8068a492ddd0b4.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.966273 5032 generic.go:334] "Generic (PLEG): container finished" podID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerID="b166be98ee151d10675b10c7940dd2f4f79dbc1b2cca42f2ed8068a492ddd0b4" exitCode=0 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.966354 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3d674820-800a-4969-ad68-93a53f9d3ad5","Type":"ContainerDied","Data":"b166be98ee151d10675b10c7940dd2f4f79dbc1b2cca42f2ed8068a492ddd0b4"} Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.981365 5032 generic.go:334] "Generic (PLEG): container finished" podID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerID="1be1fd44808132a7c830f0d2e5e400347571c06168780584b7a84a16f9b1aade" exitCode=0 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.981394 5032 generic.go:334] "Generic (PLEG): container finished" podID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerID="d5847f0b2261e34359b1d0180291fa0609016e101e77c39044ab690dd2faba90" exitCode=2 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.981403 5032 generic.go:334] "Generic (PLEG): container finished" podID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerID="51350ee0ec801a4d0f008e7d8bdf969629526afe6232f2183db7e6682239384f" exitCode=0 Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.981432 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerDied","Data":"1be1fd44808132a7c830f0d2e5e400347571c06168780584b7a84a16f9b1aade"} Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.981457 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerDied","Data":"d5847f0b2261e34359b1d0180291fa0609016e101e77c39044ab690dd2faba90"} Jan 22 16:25:05 crc kubenswrapper[5032]: I0122 16:25:05.981467 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerDied","Data":"51350ee0ec801a4d0f008e7d8bdf969629526afe6232f2183db7e6682239384f"} Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.030027 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.188517 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-combined-ca-bundle\") pod \"3d674820-800a-4969-ad68-93a53f9d3ad5\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.188682 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data\") pod \"3d674820-800a-4969-ad68-93a53f9d3ad5\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.188700 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zt4m5\" (UniqueName: \"kubernetes.io/projected/3d674820-800a-4969-ad68-93a53f9d3ad5-kube-api-access-zt4m5\") pod \"3d674820-800a-4969-ad68-93a53f9d3ad5\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.188732 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3d674820-800a-4969-ad68-93a53f9d3ad5-etc-machine-id\") pod \"3d674820-800a-4969-ad68-93a53f9d3ad5\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.188749 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data-custom\") pod \"3d674820-800a-4969-ad68-93a53f9d3ad5\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.188772 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-scripts\") pod \"3d674820-800a-4969-ad68-93a53f9d3ad5\" (UID: \"3d674820-800a-4969-ad68-93a53f9d3ad5\") " Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.189821 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d674820-800a-4969-ad68-93a53f9d3ad5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3d674820-800a-4969-ad68-93a53f9d3ad5" (UID: "3d674820-800a-4969-ad68-93a53f9d3ad5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.196194 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d674820-800a-4969-ad68-93a53f9d3ad5-kube-api-access-zt4m5" (OuterVolumeSpecName: "kube-api-access-zt4m5") pod "3d674820-800a-4969-ad68-93a53f9d3ad5" (UID: "3d674820-800a-4969-ad68-93a53f9d3ad5"). InnerVolumeSpecName "kube-api-access-zt4m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.196550 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3d674820-800a-4969-ad68-93a53f9d3ad5" (UID: "3d674820-800a-4969-ad68-93a53f9d3ad5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.200809 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-scripts" (OuterVolumeSpecName: "scripts") pod "3d674820-800a-4969-ad68-93a53f9d3ad5" (UID: "3d674820-800a-4969-ad68-93a53f9d3ad5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.245825 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d674820-800a-4969-ad68-93a53f9d3ad5" (UID: "3d674820-800a-4969-ad68-93a53f9d3ad5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.283815 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data" (OuterVolumeSpecName: "config-data") pod "3d674820-800a-4969-ad68-93a53f9d3ad5" (UID: "3d674820-800a-4969-ad68-93a53f9d3ad5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.290948 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.290980 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zt4m5\" (UniqueName: \"kubernetes.io/projected/3d674820-800a-4969-ad68-93a53f9d3ad5-kube-api-access-zt4m5\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.290990 5032 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3d674820-800a-4969-ad68-93a53f9d3ad5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.290998 5032 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.291006 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:06 crc kubenswrapper[5032]: I0122 16:25:06.291015 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d674820-800a-4969-ad68-93a53f9d3ad5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.010412 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3d674820-800a-4969-ad68-93a53f9d3ad5","Type":"ContainerDied","Data":"ff15fc56f7b9ab77ba14a68c7dc1eeb90addf650705d0d0f946b0c58c54f7e4d"} Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.010749 5032 scope.go:117] "RemoveContainer" containerID="8b3b552af45a101bd0309708d49b12d44ecb4b32a588fec73666ef5b9e07c86e" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.010552 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.038320 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.047145 5032 scope.go:117] "RemoveContainer" containerID="b166be98ee151d10675b10c7940dd2f4f79dbc1b2cca42f2ed8068a492ddd0b4" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.051295 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.068590 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:25:07 crc kubenswrapper[5032]: E0122 16:25:07.069461 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerName="init" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.069629 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerName="init" Jan 22 16:25:07 crc kubenswrapper[5032]: E0122 16:25:07.069764 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.069924 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api" Jan 22 16:25:07 crc kubenswrapper[5032]: E0122 16:25:07.070083 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="probe" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.070187 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="probe" Jan 22 16:25:07 crc kubenswrapper[5032]: E0122 16:25:07.070297 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="cinder-scheduler" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.070399 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="cinder-scheduler" Jan 22 16:25:07 crc kubenswrapper[5032]: E0122 16:25:07.070506 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api-log" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.070616 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api-log" Jan 22 16:25:07 crc kubenswrapper[5032]: E0122 16:25:07.070720 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerName="dnsmasq-dns" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.070843 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerName="dnsmasq-dns" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.071303 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="probe" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.071426 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" containerName="cinder-scheduler" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.071549 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c163df2-9ab0-4298-bd1e-22a5d51cf4bf" containerName="dnsmasq-dns" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.071698 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api-log" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.071825 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a2633a4-b63e-4b0f-b68e-4eecbd191ec8" containerName="barbican-api" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.073482 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.076292 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.092617 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.211265 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.211337 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e831a62a-4003-41ec-b3ac-ba0d29a91d95-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.211482 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.211561 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wqpv\" (UniqueName: \"kubernetes.io/projected/e831a62a-4003-41ec-b3ac-ba0d29a91d95-kube-api-access-6wqpv\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.211714 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-config-data\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.211888 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-scripts\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.219276 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bf69fb-rdlps" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.219405 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.282094 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-rvk27"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.283184 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.289673 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rvk27"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.313403 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-config-data\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.313477 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-scripts\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.313523 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.313551 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e831a62a-4003-41ec-b3ac-ba0d29a91d95-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.313584 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.313606 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wqpv\" (UniqueName: \"kubernetes.io/projected/e831a62a-4003-41ec-b3ac-ba0d29a91d95-kube-api-access-6wqpv\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.314982 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e831a62a-4003-41ec-b3ac-ba0d29a91d95-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.318379 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-scripts\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.318844 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.319878 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-config-data\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.327207 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e831a62a-4003-41ec-b3ac-ba0d29a91d95-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.328161 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wqpv\" (UniqueName: \"kubernetes.io/projected/e831a62a-4003-41ec-b3ac-ba0d29a91d95-kube-api-access-6wqpv\") pod \"cinder-scheduler-0\" (UID: \"e831a62a-4003-41ec-b3ac-ba0d29a91d95\") " pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.371065 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-vpdhx"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.372118 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.392843 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-3697-account-create-update-mdn8x"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.393978 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.401112 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.406909 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.415279 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-vpdhx"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.417190 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-operator-scripts\") pod \"nova-api-db-create-rvk27\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.417346 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl5qr\" (UniqueName: \"kubernetes.io/projected/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-kube-api-access-xl5qr\") pod \"nova-api-db-create-rvk27\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.444034 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3697-account-create-update-mdn8x"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.518910 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-operator-scripts\") pod \"nova-api-3697-account-create-update-mdn8x\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.519290 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl5qr\" (UniqueName: \"kubernetes.io/projected/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-kube-api-access-xl5qr\") pod \"nova-api-db-create-rvk27\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.519360 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npvpv\" (UniqueName: \"kubernetes.io/projected/37939e63-b34f-4b59-bebe-b99b19dd1b5f-kube-api-access-npvpv\") pod \"nova-cell0-db-create-vpdhx\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.519404 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37939e63-b34f-4b59-bebe-b99b19dd1b5f-operator-scripts\") pod \"nova-cell0-db-create-vpdhx\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.519436 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-operator-scripts\") pod \"nova-api-db-create-rvk27\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.519485 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj94w\" (UniqueName: \"kubernetes.io/projected/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-kube-api-access-rj94w\") pod \"nova-api-3697-account-create-update-mdn8x\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.520486 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-operator-scripts\") pod \"nova-api-db-create-rvk27\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.562005 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl5qr\" (UniqueName: \"kubernetes.io/projected/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-kube-api-access-xl5qr\") pod \"nova-api-db-create-rvk27\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.581920 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-dq8h2"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.583048 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.597940 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-ad31-account-create-update-lgnzz"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.599075 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.611834 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dq8h2"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.611940 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.615468 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ad31-account-create-update-lgnzz"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.616938 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.620526 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj94w\" (UniqueName: \"kubernetes.io/projected/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-kube-api-access-rj94w\") pod \"nova-api-3697-account-create-update-mdn8x\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.620616 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-operator-scripts\") pod \"nova-api-3697-account-create-update-mdn8x\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.620744 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npvpv\" (UniqueName: \"kubernetes.io/projected/37939e63-b34f-4b59-bebe-b99b19dd1b5f-kube-api-access-npvpv\") pod \"nova-cell0-db-create-vpdhx\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.620817 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37939e63-b34f-4b59-bebe-b99b19dd1b5f-operator-scripts\") pod \"nova-cell0-db-create-vpdhx\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.623530 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-operator-scripts\") pod \"nova-api-3697-account-create-update-mdn8x\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.624220 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37939e63-b34f-4b59-bebe-b99b19dd1b5f-operator-scripts\") pod \"nova-cell0-db-create-vpdhx\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.646378 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj94w\" (UniqueName: \"kubernetes.io/projected/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-kube-api-access-rj94w\") pod \"nova-api-3697-account-create-update-mdn8x\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.649847 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npvpv\" (UniqueName: \"kubernetes.io/projected/37939e63-b34f-4b59-bebe-b99b19dd1b5f-kube-api-access-npvpv\") pod \"nova-cell0-db-create-vpdhx\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.722074 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8655adea-0114-43b1-a101-47535eda5bc5-operator-scripts\") pod \"nova-cell1-db-create-dq8h2\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.722113 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tscrp\" (UniqueName: \"kubernetes.io/projected/8655adea-0114-43b1-a101-47535eda5bc5-kube-api-access-tscrp\") pod \"nova-cell1-db-create-dq8h2\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.722224 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq5fr\" (UniqueName: \"kubernetes.io/projected/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-kube-api-access-vq5fr\") pod \"nova-cell0-ad31-account-create-update-lgnzz\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.722258 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-operator-scripts\") pod \"nova-cell0-ad31-account-create-update-lgnzz\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.767105 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-a042-account-create-update-zrqtx"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.771346 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.774418 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.787195 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a042-account-create-update-zrqtx"] Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.824798 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq5fr\" (UniqueName: \"kubernetes.io/projected/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-kube-api-access-vq5fr\") pod \"nova-cell0-ad31-account-create-update-lgnzz\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.825149 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-operator-scripts\") pod \"nova-cell0-ad31-account-create-update-lgnzz\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.825247 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8655adea-0114-43b1-a101-47535eda5bc5-operator-scripts\") pod \"nova-cell1-db-create-dq8h2\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.825310 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tscrp\" (UniqueName: \"kubernetes.io/projected/8655adea-0114-43b1-a101-47535eda5bc5-kube-api-access-tscrp\") pod \"nova-cell1-db-create-dq8h2\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.826648 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-operator-scripts\") pod \"nova-cell0-ad31-account-create-update-lgnzz\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.827311 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8655adea-0114-43b1-a101-47535eda5bc5-operator-scripts\") pod \"nova-cell1-db-create-dq8h2\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.831247 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.838092 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.846261 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tscrp\" (UniqueName: \"kubernetes.io/projected/8655adea-0114-43b1-a101-47535eda5bc5-kube-api-access-tscrp\") pod \"nova-cell1-db-create-dq8h2\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.846959 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq5fr\" (UniqueName: \"kubernetes.io/projected/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-kube-api-access-vq5fr\") pod \"nova-cell0-ad31-account-create-update-lgnzz\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.906633 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.927297 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmzzq\" (UniqueName: \"kubernetes.io/projected/ec68387f-04fc-4330-9b84-233dc14a5835-kube-api-access-jmzzq\") pod \"nova-cell1-a042-account-create-update-zrqtx\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.927372 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec68387f-04fc-4330-9b84-233dc14a5835-operator-scripts\") pod \"nova-cell1-a042-account-create-update-zrqtx\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:07 crc kubenswrapper[5032]: I0122 16:25:07.956409 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.030168 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmzzq\" (UniqueName: \"kubernetes.io/projected/ec68387f-04fc-4330-9b84-233dc14a5835-kube-api-access-jmzzq\") pod \"nova-cell1-a042-account-create-update-zrqtx\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.030233 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec68387f-04fc-4330-9b84-233dc14a5835-operator-scripts\") pod \"nova-cell1-a042-account-create-update-zrqtx\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.030973 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec68387f-04fc-4330-9b84-233dc14a5835-operator-scripts\") pod \"nova-cell1-a042-account-create-update-zrqtx\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.036725 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.056547 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmzzq\" (UniqueName: \"kubernetes.io/projected/ec68387f-04fc-4330-9b84-233dc14a5835-kube-api-access-jmzzq\") pod \"nova-cell1-a042-account-create-update-zrqtx\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.104041 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.182284 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rvk27"] Jan 22 16:25:08 crc kubenswrapper[5032]: I0122 16:25:08.472758 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d674820-800a-4969-ad68-93a53f9d3ad5" path="/var/lib/kubelet/pods/3d674820-800a-4969-ad68-93a53f9d3ad5/volumes" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.050161 5032 generic.go:334] "Generic (PLEG): container finished" podID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerID="10cb1dd6a0c799da430b5ffa57aa888a5c6df142f4409db35c8742c4b0a1c358" exitCode=0 Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.050225 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerDied","Data":"10cb1dd6a0c799da430b5ffa57aa888a5c6df142f4409db35c8742c4b0a1c358"} Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.311782 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-659fb8687c-zp7xh"] Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.313810 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.317250 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.320543 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.325260 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.342198 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-659fb8687c-zp7xh"] Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.455385 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7356e680-8b67-44b4-8cff-ea118c682c66-run-httpd\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.455478 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-internal-tls-certs\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.455532 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-config-data\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.455675 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-combined-ca-bundle\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.455788 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ss8s\" (UniqueName: \"kubernetes.io/projected/7356e680-8b67-44b4-8cff-ea118c682c66-kube-api-access-4ss8s\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.455987 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7356e680-8b67-44b4-8cff-ea118c682c66-log-httpd\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.456117 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7356e680-8b67-44b4-8cff-ea118c682c66-etc-swift\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.456150 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-public-tls-certs\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558050 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7356e680-8b67-44b4-8cff-ea118c682c66-run-httpd\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558121 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-internal-tls-certs\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558147 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-config-data\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558169 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-combined-ca-bundle\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558197 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ss8s\" (UniqueName: \"kubernetes.io/projected/7356e680-8b67-44b4-8cff-ea118c682c66-kube-api-access-4ss8s\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558223 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7356e680-8b67-44b4-8cff-ea118c682c66-log-httpd\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558259 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7356e680-8b67-44b4-8cff-ea118c682c66-etc-swift\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558272 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-public-tls-certs\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.558663 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7356e680-8b67-44b4-8cff-ea118c682c66-run-httpd\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.565497 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-internal-tls-certs\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.565780 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7356e680-8b67-44b4-8cff-ea118c682c66-etc-swift\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.565867 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-combined-ca-bundle\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.568584 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-public-tls-certs\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.569672 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7356e680-8b67-44b4-8cff-ea118c682c66-config-data\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.576288 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7356e680-8b67-44b4-8cff-ea118c682c66-log-httpd\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.578777 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ss8s\" (UniqueName: \"kubernetes.io/projected/7356e680-8b67-44b4-8cff-ea118c682c66-kube-api-access-4ss8s\") pod \"swift-proxy-659fb8687c-zp7xh\" (UID: \"7356e680-8b67-44b4-8cff-ea118c682c66\") " pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.628232 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 22 16:25:09 crc kubenswrapper[5032]: I0122 16:25:09.658349 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:10 crc kubenswrapper[5032]: I0122 16:25:10.819708 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:25:13 crc kubenswrapper[5032]: I0122 16:25:13.084463 5032 generic.go:334] "Generic (PLEG): container finished" podID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerID="1f79f0f2126156b0ebaf59b79d17e3c6eec85d2cf121f7c2ba7096f5f7293ffe" exitCode=137 Jan 22 16:25:13 crc kubenswrapper[5032]: I0122 16:25:13.084529 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bf69fb-rdlps" event={"ID":"a8bf326d-2513-4e16-8c33-c23fa702de8d","Type":"ContainerDied","Data":"1f79f0f2126156b0ebaf59b79d17e3c6eec85d2cf121f7c2ba7096f5f7293ffe"} Jan 22 16:25:13 crc kubenswrapper[5032]: I0122 16:25:13.094199 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.156:3000/\": dial tcp 10.217.0.156:3000: connect: connection refused" Jan 22 16:25:13 crc kubenswrapper[5032]: I0122 16:25:13.184603 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c9dff9df7-cgg9f" Jan 22 16:25:13 crc kubenswrapper[5032]: I0122 16:25:13.257545 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-66dc46f64-9gscp"] Jan 22 16:25:13 crc kubenswrapper[5032]: I0122 16:25:13.262246 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-66dc46f64-9gscp" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-api" containerID="cri-o://b01d939fa86219beeb099fc1a3edd0a27d8c44f7ca0eda96a010c4caa7b6ed24" gracePeriod=30 Jan 22 16:25:13 crc kubenswrapper[5032]: I0122 16:25:13.262420 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-66dc46f64-9gscp" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-httpd" containerID="cri-o://ebf3523e15ede3ae09eec3521bbaf0ef23ae461e03d369a1954e332476314201" gracePeriod=30 Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.105179 5032 generic.go:334] "Generic (PLEG): container finished" podID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerID="ebf3523e15ede3ae09eec3521bbaf0ef23ae461e03d369a1954e332476314201" exitCode=0 Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.105241 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66dc46f64-9gscp" event={"ID":"ff349c82-6063-48d6-9d1f-d5075e2163b5","Type":"ContainerDied","Data":"ebf3523e15ede3ae09eec3521bbaf0ef23ae461e03d369a1954e332476314201"} Jan 22 16:25:15 crc kubenswrapper[5032]: W0122 16:25:15.138132 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode831a62a_4003_41ec_b3ac_ba0d29a91d95.slice/crio-36db9309a6b6f90b9bad114c59601b688cb4c145b0ae206e48bfed19954c6665 WatchSource:0}: Error finding container 36db9309a6b6f90b9bad114c59601b688cb4c145b0ae206e48bfed19954c6665: Status 404 returned error can't find the container with id 36db9309a6b6f90b9bad114c59601b688cb4c145b0ae206e48bfed19954c6665 Jan 22 16:25:15 crc kubenswrapper[5032]: W0122 16:25:15.139504 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4994a5dc_21af_4f0d_a705_0a8b2f8c26bc.slice/crio-ab8f822f710b9ee635d21e8db5bc1e9aa3af642f11e52d31b7e4eb3ff3c57d4a WatchSource:0}: Error finding container ab8f822f710b9ee635d21e8db5bc1e9aa3af642f11e52d31b7e4eb3ff3c57d4a: Status 404 returned error can't find the container with id ab8f822f710b9ee635d21e8db5bc1e9aa3af642f11e52d31b7e4eb3ff3c57d4a Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.701031 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.785941 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-scripts\") pod \"a8bf326d-2513-4e16-8c33-c23fa702de8d\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.786012 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-config-data\") pod \"a8bf326d-2513-4e16-8c33-c23fa702de8d\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.787420 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw7pp\" (UniqueName: \"kubernetes.io/projected/a8bf326d-2513-4e16-8c33-c23fa702de8d-kube-api-access-cw7pp\") pod \"a8bf326d-2513-4e16-8c33-c23fa702de8d\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.787461 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8bf326d-2513-4e16-8c33-c23fa702de8d-logs\") pod \"a8bf326d-2513-4e16-8c33-c23fa702de8d\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.787496 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-secret-key\") pod \"a8bf326d-2513-4e16-8c33-c23fa702de8d\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.787551 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-tls-certs\") pod \"a8bf326d-2513-4e16-8c33-c23fa702de8d\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.787599 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-combined-ca-bundle\") pod \"a8bf326d-2513-4e16-8c33-c23fa702de8d\" (UID: \"a8bf326d-2513-4e16-8c33-c23fa702de8d\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.789149 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8bf326d-2513-4e16-8c33-c23fa702de8d-logs" (OuterVolumeSpecName: "logs") pod "a8bf326d-2513-4e16-8c33-c23fa702de8d" (UID: "a8bf326d-2513-4e16-8c33-c23fa702de8d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.799044 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a8bf326d-2513-4e16-8c33-c23fa702de8d" (UID: "a8bf326d-2513-4e16-8c33-c23fa702de8d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.799580 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8bf326d-2513-4e16-8c33-c23fa702de8d-kube-api-access-cw7pp" (OuterVolumeSpecName: "kube-api-access-cw7pp") pod "a8bf326d-2513-4e16-8c33-c23fa702de8d" (UID: "a8bf326d-2513-4e16-8c33-c23fa702de8d"). InnerVolumeSpecName "kube-api-access-cw7pp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.815684 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.827450 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-scripts" (OuterVolumeSpecName: "scripts") pod "a8bf326d-2513-4e16-8c33-c23fa702de8d" (UID: "a8bf326d-2513-4e16-8c33-c23fa702de8d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.827522 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8bf326d-2513-4e16-8c33-c23fa702de8d" (UID: "a8bf326d-2513-4e16-8c33-c23fa702de8d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.856887 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-config-data" (OuterVolumeSpecName: "config-data") pod "a8bf326d-2513-4e16-8c33-c23fa702de8d" (UID: "a8bf326d-2513-4e16-8c33-c23fa702de8d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.889773 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-config-data\") pod \"4277b5eb-5f14-47db-801a-b7b2356253a9\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.889849 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-scripts\") pod \"4277b5eb-5f14-47db-801a-b7b2356253a9\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.889907 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-log-httpd\") pod \"4277b5eb-5f14-47db-801a-b7b2356253a9\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.889932 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-combined-ca-bundle\") pod \"4277b5eb-5f14-47db-801a-b7b2356253a9\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.889992 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrj84\" (UniqueName: \"kubernetes.io/projected/4277b5eb-5f14-47db-801a-b7b2356253a9-kube-api-access-rrj84\") pod \"4277b5eb-5f14-47db-801a-b7b2356253a9\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890042 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-sg-core-conf-yaml\") pod \"4277b5eb-5f14-47db-801a-b7b2356253a9\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890063 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-run-httpd\") pod \"4277b5eb-5f14-47db-801a-b7b2356253a9\" (UID: \"4277b5eb-5f14-47db-801a-b7b2356253a9\") " Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890489 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890501 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a8bf326d-2513-4e16-8c33-c23fa702de8d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890510 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw7pp\" (UniqueName: \"kubernetes.io/projected/a8bf326d-2513-4e16-8c33-c23fa702de8d-kube-api-access-cw7pp\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890521 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8bf326d-2513-4e16-8c33-c23fa702de8d-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890530 5032 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890540 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.890781 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4277b5eb-5f14-47db-801a-b7b2356253a9" (UID: "4277b5eb-5f14-47db-801a-b7b2356253a9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.891187 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4277b5eb-5f14-47db-801a-b7b2356253a9" (UID: "4277b5eb-5f14-47db-801a-b7b2356253a9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.917935 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "a8bf326d-2513-4e16-8c33-c23fa702de8d" (UID: "a8bf326d-2513-4e16-8c33-c23fa702de8d"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.917972 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-scripts" (OuterVolumeSpecName: "scripts") pod "4277b5eb-5f14-47db-801a-b7b2356253a9" (UID: "4277b5eb-5f14-47db-801a-b7b2356253a9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.932387 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4277b5eb-5f14-47db-801a-b7b2356253a9-kube-api-access-rrj84" (OuterVolumeSpecName: "kube-api-access-rrj84") pod "4277b5eb-5f14-47db-801a-b7b2356253a9" (UID: "4277b5eb-5f14-47db-801a-b7b2356253a9"). InnerVolumeSpecName "kube-api-access-rrj84". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.986454 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-659fb8687c-zp7xh"] Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.992445 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.992473 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.992486 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrj84\" (UniqueName: \"kubernetes.io/projected/4277b5eb-5f14-47db-801a-b7b2356253a9-kube-api-access-rrj84\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.992500 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4277b5eb-5f14-47db-801a-b7b2356253a9-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:15 crc kubenswrapper[5032]: I0122 16:25:15.992510 5032 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8bf326d-2513-4e16-8c33-c23fa702de8d-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.002439 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4277b5eb-5f14-47db-801a-b7b2356253a9" (UID: "4277b5eb-5f14-47db-801a-b7b2356253a9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.027810 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-config-data" (OuterVolumeSpecName: "config-data") pod "4277b5eb-5f14-47db-801a-b7b2356253a9" (UID: "4277b5eb-5f14-47db-801a-b7b2356253a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.052054 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4277b5eb-5f14-47db-801a-b7b2356253a9" (UID: "4277b5eb-5f14-47db-801a-b7b2356253a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.095923 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.095955 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.095965 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4277b5eb-5f14-47db-801a-b7b2356253a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.121134 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"149da497-8a18-4333-ae25-49d36efc7538","Type":"ContainerStarted","Data":"4c59275b7fd174327dd9c28209d6c02fd7da4eef73bf7b8c013aff2faa06b64f"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.129719 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rvk27" event={"ID":"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc","Type":"ContainerStarted","Data":"c7c7ce6416591cd56092d334e386f19884997d315b89732143ae16acd4ecfa2f"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.129755 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rvk27" event={"ID":"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc","Type":"ContainerStarted","Data":"ab8f822f710b9ee635d21e8db5bc1e9aa3af642f11e52d31b7e4eb3ff3c57d4a"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.144991 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-659fb8687c-zp7xh" event={"ID":"7356e680-8b67-44b4-8cff-ea118c682c66","Type":"ContainerStarted","Data":"21f4c93030d5c19c27f34019839a771d2cd3fc27dd08bb51155621cad89ada26"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.159323 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-rvk27" podStartSLOduration=9.15930593 podStartE2EDuration="9.15930593s" podCreationTimestamp="2026-01-22 16:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:16.152842146 +0000 UTC m=+1219.965011177" watchObservedRunningTime="2026-01-22 16:25:16.15930593 +0000 UTC m=+1219.971474961" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.160305 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4277b5eb-5f14-47db-801a-b7b2356253a9","Type":"ContainerDied","Data":"68b662703be8dfb70cab672ebeec28f57020bfb6be8969be62c90c9c0dbd6028"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.160436 5032 scope.go:117] "RemoveContainer" containerID="1be1fd44808132a7c830f0d2e5e400347571c06168780584b7a84a16f9b1aade" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.160650 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.197669 5032 generic.go:334] "Generic (PLEG): container finished" podID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerID="b01d939fa86219beeb099fc1a3edd0a27d8c44f7ca0eda96a010c4caa7b6ed24" exitCode=0 Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.197736 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66dc46f64-9gscp" event={"ID":"ff349c82-6063-48d6-9d1f-d5075e2163b5","Type":"ContainerDied","Data":"b01d939fa86219beeb099fc1a3edd0a27d8c44f7ca0eda96a010c4caa7b6ed24"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.200808 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bf69fb-rdlps" event={"ID":"a8bf326d-2513-4e16-8c33-c23fa702de8d","Type":"ContainerDied","Data":"e5613f8edd73a8e2de1177bd8f475dd6a766cddd6141b3960c72ce0e0144ddc6"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.201013 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bf69fb-rdlps" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.223317 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e831a62a-4003-41ec-b3ac-ba0d29a91d95","Type":"ContainerStarted","Data":"36db9309a6b6f90b9bad114c59601b688cb4c145b0ae206e48bfed19954c6665"} Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.282053 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ad31-account-create-update-lgnzz"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.291751 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dq8h2"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.301666 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3697-account-create-update-mdn8x"] Jan 22 16:25:16 crc kubenswrapper[5032]: W0122 16:25:16.350992 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b900cbd_8e74_4b76_8bb6_99c355ac6ebd.slice/crio-2db5df6f261bf6303179719c6a7c57186c8ad06af2ed60515e315f48917f2071 WatchSource:0}: Error finding container 2db5df6f261bf6303179719c6a7c57186c8ad06af2ed60515e315f48917f2071: Status 404 returned error can't find the container with id 2db5df6f261bf6303179719c6a7c57186c8ad06af2ed60515e315f48917f2071 Jan 22 16:25:16 crc kubenswrapper[5032]: W0122 16:25:16.353735 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa13677f_87b8_46f1_85fa_07b79dfd6ac8.slice/crio-95057471c3061341254a6629febd26518a5cdec9ac39790e9996bac7c49ca9e4 WatchSource:0}: Error finding container 95057471c3061341254a6629febd26518a5cdec9ac39790e9996bac7c49ca9e4: Status 404 returned error can't find the container with id 95057471c3061341254a6629febd26518a5cdec9ac39790e9996bac7c49ca9e4 Jan 22 16:25:16 crc kubenswrapper[5032]: W0122 16:25:16.361826 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8655adea_0114_43b1_a101_47535eda5bc5.slice/crio-cced111fa6cbe13d3bc1363cd364e27203316709cfcccd05ad735fe3d3e2e353 WatchSource:0}: Error finding container cced111fa6cbe13d3bc1363cd364e27203316709cfcccd05ad735fe3d3e2e353: Status 404 returned error can't find the container with id cced111fa6cbe13d3bc1363cd364e27203316709cfcccd05ad735fe3d3e2e353 Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.437189 5032 scope.go:117] "RemoveContainer" containerID="d5847f0b2261e34359b1d0180291fa0609016e101e77c39044ab690dd2faba90" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.531790 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a042-account-create-update-zrqtx"] Jan 22 16:25:16 crc kubenswrapper[5032]: W0122 16:25:16.542190 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec68387f_04fc_4330_9b84_233dc14a5835.slice/crio-59b18a07a9821c187931fbeb2ca0ff419ebb5a0c5eb03465dbc1a8b41c9046c8 WatchSource:0}: Error finding container 59b18a07a9821c187931fbeb2ca0ff419ebb5a0c5eb03465dbc1a8b41c9046c8: Status 404 returned error can't find the container with id 59b18a07a9821c187931fbeb2ca0ff419ebb5a0c5eb03465dbc1a8b41c9046c8 Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.550711 5032 scope.go:117] "RemoveContainer" containerID="10cb1dd6a0c799da430b5ffa57aa888a5c6df142f4409db35c8742c4b0a1c358" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.554714 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-vpdhx"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.591096 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.594618 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68bf69fb-rdlps"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.634363 5032 scope.go:117] "RemoveContainer" containerID="51350ee0ec801a4d0f008e7d8bdf969629526afe6232f2183db7e6682239384f" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.644682 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-68bf69fb-rdlps"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.676286 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.683959 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.694923 5032 scope.go:117] "RemoveContainer" containerID="c19e7c4f249df844077ca639ddf4702d340f082798870e59ea1c0a5891e3d050" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700142 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700609 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-central-agent" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700624 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-central-agent" Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700638 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-notification-agent" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700644 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-notification-agent" Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700664 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-httpd" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700670 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-httpd" Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700685 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="proxy-httpd" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700692 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="proxy-httpd" Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700701 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-api" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700706 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-api" Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700715 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="sg-core" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700721 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="sg-core" Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700730 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon-log" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700735 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon-log" Jan 22 16:25:16 crc kubenswrapper[5032]: E0122 16:25:16.700749 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700754 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.700999 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.701011 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="sg-core" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.701025 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-central-agent" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.701048 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-api" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.701058 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="ceilometer-notification-agent" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.701070 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" containerName="horizon-log" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.701080 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" containerName="neutron-httpd" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.701087 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" containerName="proxy-httpd" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.702633 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.704792 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.705275 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.708285 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.717647 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-httpd-config\") pod \"ff349c82-6063-48d6-9d1f-d5075e2163b5\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.717677 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-combined-ca-bundle\") pod \"ff349c82-6063-48d6-9d1f-d5075e2163b5\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.717702 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-ovndb-tls-certs\") pod \"ff349c82-6063-48d6-9d1f-d5075e2163b5\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.717809 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lkcp\" (UniqueName: \"kubernetes.io/projected/ff349c82-6063-48d6-9d1f-d5075e2163b5-kube-api-access-9lkcp\") pod \"ff349c82-6063-48d6-9d1f-d5075e2163b5\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.717876 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-config\") pod \"ff349c82-6063-48d6-9d1f-d5075e2163b5\" (UID: \"ff349c82-6063-48d6-9d1f-d5075e2163b5\") " Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.741309 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff349c82-6063-48d6-9d1f-d5075e2163b5-kube-api-access-9lkcp" (OuterVolumeSpecName: "kube-api-access-9lkcp") pod "ff349c82-6063-48d6-9d1f-d5075e2163b5" (UID: "ff349c82-6063-48d6-9d1f-d5075e2163b5"). InnerVolumeSpecName "kube-api-access-9lkcp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.760460 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "ff349c82-6063-48d6-9d1f-d5075e2163b5" (UID: "ff349c82-6063-48d6-9d1f-d5075e2163b5"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.819552 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-run-httpd\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.819619 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.819639 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-config-data\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.819688 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pscwk\" (UniqueName: \"kubernetes.io/projected/ceac2bce-8113-4730-a763-80e0e0395b75-kube-api-access-pscwk\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.819766 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-log-httpd\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.819791 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-scripts\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.819843 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.820088 5032 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.820102 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lkcp\" (UniqueName: \"kubernetes.io/projected/ff349c82-6063-48d6-9d1f-d5075e2163b5-kube-api-access-9lkcp\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.856308 5032 scope.go:117] "RemoveContainer" containerID="1f79f0f2126156b0ebaf59b79d17e3c6eec85d2cf121f7c2ba7096f5f7293ffe" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.912591 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-config" (OuterVolumeSpecName: "config") pod "ff349c82-6063-48d6-9d1f-d5075e2163b5" (UID: "ff349c82-6063-48d6-9d1f-d5075e2163b5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921382 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921446 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-config-data\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921533 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pscwk\" (UniqueName: \"kubernetes.io/projected/ceac2bce-8113-4730-a763-80e0e0395b75-kube-api-access-pscwk\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921594 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-log-httpd\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921612 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-scripts\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921683 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921757 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-run-httpd\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.921897 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.922040 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-log-httpd\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.922465 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-run-httpd\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.927218 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-config-data\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.927419 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-scripts\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.927664 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.932626 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.940577 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pscwk\" (UniqueName: \"kubernetes.io/projected/ceac2bce-8113-4730-a763-80e0e0395b75-kube-api-access-pscwk\") pod \"ceilometer-0\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " pod="openstack/ceilometer-0" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.940953 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff349c82-6063-48d6-9d1f-d5075e2163b5" (UID: "ff349c82-6063-48d6-9d1f-d5075e2163b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:16 crc kubenswrapper[5032]: I0122 16:25:16.964031 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "ff349c82-6063-48d6-9d1f-d5075e2163b5" (UID: "ff349c82-6063-48d6-9d1f-d5075e2163b5"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.023392 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.023438 5032 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff349c82-6063-48d6-9d1f-d5075e2163b5-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.024158 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.281047 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e831a62a-4003-41ec-b3ac-ba0d29a91d95","Type":"ContainerStarted","Data":"392346e95511f9e773c8a4027153ea1358a1abe20ee1967ad00faf07032136e6"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.283375 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3697-account-create-update-mdn8x" event={"ID":"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd","Type":"ContainerStarted","Data":"852d664b9848105063e60d29ac3c01239ccb746805d76676e0183d98c236a3da"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.283402 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3697-account-create-update-mdn8x" event={"ID":"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd","Type":"ContainerStarted","Data":"2db5df6f261bf6303179719c6a7c57186c8ad06af2ed60515e315f48917f2071"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.288561 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" event={"ID":"aa13677f-87b8-46f1-85fa-07b79dfd6ac8","Type":"ContainerStarted","Data":"f6487cef5f478908a3389988e4e0bc886923c3dc838968d8f65b0a6367d12a0a"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.288594 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" event={"ID":"aa13677f-87b8-46f1-85fa-07b79dfd6ac8","Type":"ContainerStarted","Data":"95057471c3061341254a6629febd26518a5cdec9ac39790e9996bac7c49ca9e4"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.311357 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-vpdhx" event={"ID":"37939e63-b34f-4b59-bebe-b99b19dd1b5f","Type":"ContainerStarted","Data":"73c5ea290912fe47dd080acfe335601d0fa52ebce0753bb288d78e30b55a73f8"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.311430 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-vpdhx" event={"ID":"37939e63-b34f-4b59-bebe-b99b19dd1b5f","Type":"ContainerStarted","Data":"aeff63d2e8cd089643bb727e2211a10587fc98be55b9c826d3913c3852b65d88"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.329390 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-659fb8687c-zp7xh" event={"ID":"7356e680-8b67-44b4-8cff-ea118c682c66","Type":"ContainerStarted","Data":"30daaae282a795c64582f4613969db5203f45cfd7028f5a699da42307b9ffa41"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.329451 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-659fb8687c-zp7xh" event={"ID":"7356e680-8b67-44b4-8cff-ea118c682c66","Type":"ContainerStarted","Data":"9034dd3233225f52ce17cde92869a69ae62eaf05a8564f4d333ea21decbd89fe"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.330357 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.331266 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.336274 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-3697-account-create-update-mdn8x" podStartSLOduration=10.336237778 podStartE2EDuration="10.336237778s" podCreationTimestamp="2026-01-22 16:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:17.298185562 +0000 UTC m=+1221.110354593" watchObservedRunningTime="2026-01-22 16:25:17.336237778 +0000 UTC m=+1221.148406809" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.338823 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" event={"ID":"ec68387f-04fc-4330-9b84-233dc14a5835","Type":"ContainerStarted","Data":"93196dcce23769bcc71bea177c91a1481839ceca3bd0c4ab6b6ba303de2321e9"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.338877 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" event={"ID":"ec68387f-04fc-4330-9b84-233dc14a5835","Type":"ContainerStarted","Data":"59b18a07a9821c187931fbeb2ca0ff419ebb5a0c5eb03465dbc1a8b41c9046c8"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.348956 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" podStartSLOduration=10.348943091 podStartE2EDuration="10.348943091s" podCreationTimestamp="2026-01-22 16:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:17.317498403 +0000 UTC m=+1221.129667434" watchObservedRunningTime="2026-01-22 16:25:17.348943091 +0000 UTC m=+1221.161112122" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.357528 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dq8h2" event={"ID":"8655adea-0114-43b1-a101-47535eda5bc5","Type":"ContainerStarted","Data":"6a3627df4bdb5b3993ef997e2e469602d28ae59018f0d2fe9c7c3168933e28f1"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.357566 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dq8h2" event={"ID":"8655adea-0114-43b1-a101-47535eda5bc5","Type":"ContainerStarted","Data":"cced111fa6cbe13d3bc1363cd364e27203316709cfcccd05ad735fe3d3e2e353"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.386264 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66dc46f64-9gscp" event={"ID":"ff349c82-6063-48d6-9d1f-d5075e2163b5","Type":"ContainerDied","Data":"aa08fd3e88822cf303c48572dd23545d389e889bb2de8caedf4cb3a36178b654"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.386627 5032 scope.go:117] "RemoveContainer" containerID="ebf3523e15ede3ae09eec3521bbaf0ef23ae461e03d369a1954e332476314201" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.387018 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66dc46f64-9gscp" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.404267 5032 generic.go:334] "Generic (PLEG): container finished" podID="4994a5dc-21af-4f0d-a705-0a8b2f8c26bc" containerID="c7c7ce6416591cd56092d334e386f19884997d315b89732143ae16acd4ecfa2f" exitCode=0 Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.404346 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rvk27" event={"ID":"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc","Type":"ContainerDied","Data":"c7c7ce6416591cd56092d334e386f19884997d315b89732143ae16acd4ecfa2f"} Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.410677 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-vpdhx" podStartSLOduration=10.410658785 podStartE2EDuration="10.410658785s" podCreationTimestamp="2026-01-22 16:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:17.333908025 +0000 UTC m=+1221.146077066" watchObservedRunningTime="2026-01-22 16:25:17.410658785 +0000 UTC m=+1221.222827816" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.415648 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-659fb8687c-zp7xh" podStartSLOduration=8.415474555 podStartE2EDuration="8.415474555s" podCreationTimestamp="2026-01-22 16:25:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:17.356791663 +0000 UTC m=+1221.168960694" watchObservedRunningTime="2026-01-22 16:25:17.415474555 +0000 UTC m=+1221.227643596" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.444210 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" podStartSLOduration=10.444188539 podStartE2EDuration="10.444188539s" podCreationTimestamp="2026-01-22 16:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:17.374622493 +0000 UTC m=+1221.186791524" watchObservedRunningTime="2026-01-22 16:25:17.444188539 +0000 UTC m=+1221.256357570" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.478561 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.120648388 podStartE2EDuration="16.478531936s" podCreationTimestamp="2026-01-22 16:25:01 +0000 UTC" firstStartedPulling="2026-01-22 16:25:02.304659296 +0000 UTC m=+1206.116828327" lastFinishedPulling="2026-01-22 16:25:15.662542834 +0000 UTC m=+1219.474711875" observedRunningTime="2026-01-22 16:25:17.433988934 +0000 UTC m=+1221.246157965" watchObservedRunningTime="2026-01-22 16:25:17.478531936 +0000 UTC m=+1221.290700967" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.556808 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:17 crc kubenswrapper[5032]: W0122 16:25:17.560823 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podceac2bce_8113_4730_a763_80e0e0395b75.slice/crio-a59bd620116e7e5f9faefd1fcf1b811c1ab7b01e5fddcfab933dc77e0eee531e WatchSource:0}: Error finding container a59bd620116e7e5f9faefd1fcf1b811c1ab7b01e5fddcfab933dc77e0eee531e: Status 404 returned error can't find the container with id a59bd620116e7e5f9faefd1fcf1b811c1ab7b01e5fddcfab933dc77e0eee531e Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.569924 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-66dc46f64-9gscp"] Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.570960 5032 scope.go:117] "RemoveContainer" containerID="b01d939fa86219beeb099fc1a3edd0a27d8c44f7ca0eda96a010c4caa7b6ed24" Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.580237 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-66dc46f64-9gscp"] Jan 22 16:25:17 crc kubenswrapper[5032]: I0122 16:25:17.837567 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.424533 5032 generic.go:334] "Generic (PLEG): container finished" podID="7b900cbd-8e74-4b76-8bb6-99c355ac6ebd" containerID="852d664b9848105063e60d29ac3c01239ccb746805d76676e0183d98c236a3da" exitCode=0 Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.424798 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3697-account-create-update-mdn8x" event={"ID":"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd","Type":"ContainerDied","Data":"852d664b9848105063e60d29ac3c01239ccb746805d76676e0183d98c236a3da"} Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.431530 5032 generic.go:334] "Generic (PLEG): container finished" podID="aa13677f-87b8-46f1-85fa-07b79dfd6ac8" containerID="f6487cef5f478908a3389988e4e0bc886923c3dc838968d8f65b0a6367d12a0a" exitCode=0 Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.431671 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" event={"ID":"aa13677f-87b8-46f1-85fa-07b79dfd6ac8","Type":"ContainerDied","Data":"f6487cef5f478908a3389988e4e0bc886923c3dc838968d8f65b0a6367d12a0a"} Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.441402 5032 generic.go:334] "Generic (PLEG): container finished" podID="8655adea-0114-43b1-a101-47535eda5bc5" containerID="6a3627df4bdb5b3993ef997e2e469602d28ae59018f0d2fe9c7c3168933e28f1" exitCode=0 Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.441479 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dq8h2" event={"ID":"8655adea-0114-43b1-a101-47535eda5bc5","Type":"ContainerDied","Data":"6a3627df4bdb5b3993ef997e2e469602d28ae59018f0d2fe9c7c3168933e28f1"} Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.455495 5032 generic.go:334] "Generic (PLEG): container finished" podID="ec68387f-04fc-4330-9b84-233dc14a5835" containerID="93196dcce23769bcc71bea177c91a1481839ceca3bd0c4ab6b6ba303de2321e9" exitCode=0 Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.482518 5032 generic.go:334] "Generic (PLEG): container finished" podID="37939e63-b34f-4b59-bebe-b99b19dd1b5f" containerID="73c5ea290912fe47dd080acfe335601d0fa52ebce0753bb288d78e30b55a73f8" exitCode=0 Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.493351 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4277b5eb-5f14-47db-801a-b7b2356253a9" path="/var/lib/kubelet/pods/4277b5eb-5f14-47db-801a-b7b2356253a9/volumes" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.494155 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8bf326d-2513-4e16-8c33-c23fa702de8d" path="/var/lib/kubelet/pods/a8bf326d-2513-4e16-8c33-c23fa702de8d/volumes" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.495201 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff349c82-6063-48d6-9d1f-d5075e2163b5" path="/var/lib/kubelet/pods/ff349c82-6063-48d6-9d1f-d5075e2163b5/volumes" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.496079 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" event={"ID":"ec68387f-04fc-4330-9b84-233dc14a5835","Type":"ContainerDied","Data":"93196dcce23769bcc71bea177c91a1481839ceca3bd0c4ab6b6ba303de2321e9"} Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.496167 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-vpdhx" event={"ID":"37939e63-b34f-4b59-bebe-b99b19dd1b5f","Type":"ContainerDied","Data":"73c5ea290912fe47dd080acfe335601d0fa52ebce0753bb288d78e30b55a73f8"} Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.504274 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerStarted","Data":"a59bd620116e7e5f9faefd1fcf1b811c1ab7b01e5fddcfab933dc77e0eee531e"} Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.515987 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e831a62a-4003-41ec-b3ac-ba0d29a91d95","Type":"ContainerStarted","Data":"2dc965df082194facbfa488f1a958ece1173ac0ecf4b8ba5485306432ee7eef6"} Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.911256 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.921574 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.972046 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tscrp\" (UniqueName: \"kubernetes.io/projected/8655adea-0114-43b1-a101-47535eda5bc5-kube-api-access-tscrp\") pod \"8655adea-0114-43b1-a101-47535eda5bc5\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.972105 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8655adea-0114-43b1-a101-47535eda5bc5-operator-scripts\") pod \"8655adea-0114-43b1-a101-47535eda5bc5\" (UID: \"8655adea-0114-43b1-a101-47535eda5bc5\") " Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.972135 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xl5qr\" (UniqueName: \"kubernetes.io/projected/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-kube-api-access-xl5qr\") pod \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.972208 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-operator-scripts\") pod \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\" (UID: \"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc\") " Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.973149 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8655adea-0114-43b1-a101-47535eda5bc5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8655adea-0114-43b1-a101-47535eda5bc5" (UID: "8655adea-0114-43b1-a101-47535eda5bc5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.973507 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4994a5dc-21af-4f0d-a705-0a8b2f8c26bc" (UID: "4994a5dc-21af-4f0d-a705-0a8b2f8c26bc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.987433 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8655adea-0114-43b1-a101-47535eda5bc5-kube-api-access-tscrp" (OuterVolumeSpecName: "kube-api-access-tscrp") pod "8655adea-0114-43b1-a101-47535eda5bc5" (UID: "8655adea-0114-43b1-a101-47535eda5bc5"). InnerVolumeSpecName "kube-api-access-tscrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:18 crc kubenswrapper[5032]: I0122 16:25:18.990179 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-kube-api-access-xl5qr" (OuterVolumeSpecName: "kube-api-access-xl5qr") pod "4994a5dc-21af-4f0d-a705-0a8b2f8c26bc" (UID: "4994a5dc-21af-4f0d-a705-0a8b2f8c26bc"). InnerVolumeSpecName "kube-api-access-xl5qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.074590 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tscrp\" (UniqueName: \"kubernetes.io/projected/8655adea-0114-43b1-a101-47535eda5bc5-kube-api-access-tscrp\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.074621 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8655adea-0114-43b1-a101-47535eda5bc5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.074629 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xl5qr\" (UniqueName: \"kubernetes.io/projected/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-kube-api-access-xl5qr\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.074638 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.530787 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rvk27" event={"ID":"4994a5dc-21af-4f0d-a705-0a8b2f8c26bc","Type":"ContainerDied","Data":"ab8f822f710b9ee635d21e8db5bc1e9aa3af642f11e52d31b7e4eb3ff3c57d4a"} Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.531225 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab8f822f710b9ee635d21e8db5bc1e9aa3af642f11e52d31b7e4eb3ff3c57d4a" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.530841 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rvk27" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.534569 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dq8h2" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.534645 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dq8h2" event={"ID":"8655adea-0114-43b1-a101-47535eda5bc5","Type":"ContainerDied","Data":"cced111fa6cbe13d3bc1363cd364e27203316709cfcccd05ad735fe3d3e2e353"} Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.534702 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cced111fa6cbe13d3bc1363cd364e27203316709cfcccd05ad735fe3d3e2e353" Jan 22 16:25:19 crc kubenswrapper[5032]: I0122 16:25:19.565933 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=12.565920108 podStartE2EDuration="12.565920108s" podCreationTimestamp="2026-01-22 16:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:19.563237285 +0000 UTC m=+1223.375406316" watchObservedRunningTime="2026-01-22 16:25:19.565920108 +0000 UTC m=+1223.378089139" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.125924 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.130378 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.136820 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.178952 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.206097 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npvpv\" (UniqueName: \"kubernetes.io/projected/37939e63-b34f-4b59-bebe-b99b19dd1b5f-kube-api-access-npvpv\") pod \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.206188 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmzzq\" (UniqueName: \"kubernetes.io/projected/ec68387f-04fc-4330-9b84-233dc14a5835-kube-api-access-jmzzq\") pod \"ec68387f-04fc-4330-9b84-233dc14a5835\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.206247 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec68387f-04fc-4330-9b84-233dc14a5835-operator-scripts\") pod \"ec68387f-04fc-4330-9b84-233dc14a5835\" (UID: \"ec68387f-04fc-4330-9b84-233dc14a5835\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.206268 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-operator-scripts\") pod \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.206382 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37939e63-b34f-4b59-bebe-b99b19dd1b5f-operator-scripts\") pod \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\" (UID: \"37939e63-b34f-4b59-bebe-b99b19dd1b5f\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.206409 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rj94w\" (UniqueName: \"kubernetes.io/projected/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-kube-api-access-rj94w\") pod \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\" (UID: \"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.206794 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec68387f-04fc-4330-9b84-233dc14a5835-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ec68387f-04fc-4330-9b84-233dc14a5835" (UID: "ec68387f-04fc-4330-9b84-233dc14a5835"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.207014 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7b900cbd-8e74-4b76-8bb6-99c355ac6ebd" (UID: "7b900cbd-8e74-4b76-8bb6-99c355ac6ebd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.207054 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37939e63-b34f-4b59-bebe-b99b19dd1b5f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37939e63-b34f-4b59-bebe-b99b19dd1b5f" (UID: "37939e63-b34f-4b59-bebe-b99b19dd1b5f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.212222 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37939e63-b34f-4b59-bebe-b99b19dd1b5f-kube-api-access-npvpv" (OuterVolumeSpecName: "kube-api-access-npvpv") pod "37939e63-b34f-4b59-bebe-b99b19dd1b5f" (UID: "37939e63-b34f-4b59-bebe-b99b19dd1b5f"). InnerVolumeSpecName "kube-api-access-npvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.212272 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-kube-api-access-rj94w" (OuterVolumeSpecName: "kube-api-access-rj94w") pod "7b900cbd-8e74-4b76-8bb6-99c355ac6ebd" (UID: "7b900cbd-8e74-4b76-8bb6-99c355ac6ebd"). InnerVolumeSpecName "kube-api-access-rj94w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.212321 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec68387f-04fc-4330-9b84-233dc14a5835-kube-api-access-jmzzq" (OuterVolumeSpecName: "kube-api-access-jmzzq") pod "ec68387f-04fc-4330-9b84-233dc14a5835" (UID: "ec68387f-04fc-4330-9b84-233dc14a5835"). InnerVolumeSpecName "kube-api-access-jmzzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.308240 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq5fr\" (UniqueName: \"kubernetes.io/projected/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-kube-api-access-vq5fr\") pod \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.308537 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-operator-scripts\") pod \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\" (UID: \"aa13677f-87b8-46f1-85fa-07b79dfd6ac8\") " Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.309046 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npvpv\" (UniqueName: \"kubernetes.io/projected/37939e63-b34f-4b59-bebe-b99b19dd1b5f-kube-api-access-npvpv\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.309076 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmzzq\" (UniqueName: \"kubernetes.io/projected/ec68387f-04fc-4330-9b84-233dc14a5835-kube-api-access-jmzzq\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.309074 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aa13677f-87b8-46f1-85fa-07b79dfd6ac8" (UID: "aa13677f-87b8-46f1-85fa-07b79dfd6ac8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.309092 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec68387f-04fc-4330-9b84-233dc14a5835-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.309148 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.309161 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37939e63-b34f-4b59-bebe-b99b19dd1b5f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.309172 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rj94w\" (UniqueName: \"kubernetes.io/projected/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd-kube-api-access-rj94w\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.312448 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-kube-api-access-vq5fr" (OuterVolumeSpecName: "kube-api-access-vq5fr") pod "aa13677f-87b8-46f1-85fa-07b79dfd6ac8" (UID: "aa13677f-87b8-46f1-85fa-07b79dfd6ac8"). InnerVolumeSpecName "kube-api-access-vq5fr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.410808 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq5fr\" (UniqueName: \"kubernetes.io/projected/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-kube-api-access-vq5fr\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.410846 5032 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa13677f-87b8-46f1-85fa-07b79dfd6ac8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.546831 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-vpdhx" event={"ID":"37939e63-b34f-4b59-bebe-b99b19dd1b5f","Type":"ContainerDied","Data":"aeff63d2e8cd089643bb727e2211a10587fc98be55b9c826d3913c3852b65d88"} Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.546882 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aeff63d2e8cd089643bb727e2211a10587fc98be55b9c826d3913c3852b65d88" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.546931 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vpdhx" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.549643 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3697-account-create-update-mdn8x" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.549674 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3697-account-create-update-mdn8x" event={"ID":"7b900cbd-8e74-4b76-8bb6-99c355ac6ebd","Type":"ContainerDied","Data":"2db5df6f261bf6303179719c6a7c57186c8ad06af2ed60515e315f48917f2071"} Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.549750 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2db5df6f261bf6303179719c6a7c57186c8ad06af2ed60515e315f48917f2071" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.551477 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" event={"ID":"aa13677f-87b8-46f1-85fa-07b79dfd6ac8","Type":"ContainerDied","Data":"95057471c3061341254a6629febd26518a5cdec9ac39790e9996bac7c49ca9e4"} Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.551501 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95057471c3061341254a6629febd26518a5cdec9ac39790e9996bac7c49ca9e4" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.551520 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad31-account-create-update-lgnzz" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.553546 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" event={"ID":"ec68387f-04fc-4330-9b84-233dc14a5835","Type":"ContainerDied","Data":"59b18a07a9821c187931fbeb2ca0ff419ebb5a0c5eb03465dbc1a8b41c9046c8"} Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.553570 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="59b18a07a9821c187931fbeb2ca0ff419ebb5a0c5eb03465dbc1a8b41c9046c8" Jan 22 16:25:20 crc kubenswrapper[5032]: I0122 16:25:20.553579 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a042-account-create-update-zrqtx" Jan 22 16:25:21 crc kubenswrapper[5032]: I0122 16:25:21.579817 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerStarted","Data":"bfad2b86781384e7713b8d24c1fb8787ec379485e325ccdaed2b0bc17ab2ffab"} Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.408051 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.592593 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerStarted","Data":"79bc601e172a20eae31efb59fd30dd1328366a2ea2e463ee9bc9a93f8dc808ad"} Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.624683 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.921142 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-92g57"] Jan 22 16:25:22 crc kubenswrapper[5032]: E0122 16:25:22.921976 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37939e63-b34f-4b59-bebe-b99b19dd1b5f" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922010 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="37939e63-b34f-4b59-bebe-b99b19dd1b5f" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: E0122 16:25:22.922033 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa13677f-87b8-46f1-85fa-07b79dfd6ac8" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922043 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa13677f-87b8-46f1-85fa-07b79dfd6ac8" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: E0122 16:25:22.922081 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4994a5dc-21af-4f0d-a705-0a8b2f8c26bc" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922090 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4994a5dc-21af-4f0d-a705-0a8b2f8c26bc" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: E0122 16:25:22.922105 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8655adea-0114-43b1-a101-47535eda5bc5" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922114 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8655adea-0114-43b1-a101-47535eda5bc5" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: E0122 16:25:22.922145 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec68387f-04fc-4330-9b84-233dc14a5835" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922154 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec68387f-04fc-4330-9b84-233dc14a5835" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: E0122 16:25:22.922168 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b900cbd-8e74-4b76-8bb6-99c355ac6ebd" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922176 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b900cbd-8e74-4b76-8bb6-99c355ac6ebd" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922405 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa13677f-87b8-46f1-85fa-07b79dfd6ac8" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922424 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="37939e63-b34f-4b59-bebe-b99b19dd1b5f" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922436 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b900cbd-8e74-4b76-8bb6-99c355ac6ebd" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922456 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4994a5dc-21af-4f0d-a705-0a8b2f8c26bc" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922468 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec68387f-04fc-4330-9b84-233dc14a5835" containerName="mariadb-account-create-update" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.922478 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="8655adea-0114-43b1-a101-47535eda5bc5" containerName="mariadb-database-create" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.923538 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.930226 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mv6pk" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.930226 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.930952 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 16:25:22 crc kubenswrapper[5032]: I0122 16:25:22.933009 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-92g57"] Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.060850 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jfhf\" (UniqueName: \"kubernetes.io/projected/6c83b824-d7c9-413d-abdf-353330608097-kube-api-access-5jfhf\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.060923 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-config-data\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.060981 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-scripts\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.061025 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.162430 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.162629 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jfhf\" (UniqueName: \"kubernetes.io/projected/6c83b824-d7c9-413d-abdf-353330608097-kube-api-access-5jfhf\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.162671 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-config-data\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.162784 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-scripts\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.168117 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.168682 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-scripts\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.176736 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-config-data\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.184610 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jfhf\" (UniqueName: \"kubernetes.io/projected/6c83b824-d7c9-413d-abdf-353330608097-kube-api-access-5jfhf\") pod \"nova-cell0-conductor-db-sync-92g57\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.253016 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.608223 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerStarted","Data":"2897fb9ef8d10f0636802991cfb2741bb92a15f1387c1c6f15df0d7aea55d802"} Jan 22 16:25:23 crc kubenswrapper[5032]: I0122 16:25:23.858394 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-92g57"] Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.622058 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-92g57" event={"ID":"6c83b824-d7c9-413d-abdf-353330608097","Type":"ContainerStarted","Data":"a1aa721de96b8bc51e5fd9157b476593c0fa75dc454a22c89e1f4aa7e2b02a98"} Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.628350 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerStarted","Data":"c29010e63f41863822ccedd7b38a3cfc02fc9f33b92c997775d1f578c0424add"} Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.628523 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-central-agent" containerID="cri-o://bfad2b86781384e7713b8d24c1fb8787ec379485e325ccdaed2b0bc17ab2ffab" gracePeriod=30 Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.628693 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.628910 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="proxy-httpd" containerID="cri-o://c29010e63f41863822ccedd7b38a3cfc02fc9f33b92c997775d1f578c0424add" gracePeriod=30 Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.628965 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-notification-agent" containerID="cri-o://79bc601e172a20eae31efb59fd30dd1328366a2ea2e463ee9bc9a93f8dc808ad" gracePeriod=30 Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.629085 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="sg-core" containerID="cri-o://2897fb9ef8d10f0636802991cfb2741bb92a15f1387c1c6f15df0d7aea55d802" gracePeriod=30 Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.669990 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.266145366 podStartE2EDuration="8.66996566s" podCreationTimestamp="2026-01-22 16:25:16 +0000 UTC" firstStartedPulling="2026-01-22 16:25:17.564594807 +0000 UTC m=+1221.376763848" lastFinishedPulling="2026-01-22 16:25:23.968415111 +0000 UTC m=+1227.780584142" observedRunningTime="2026-01-22 16:25:24.658169382 +0000 UTC m=+1228.470338413" watchObservedRunningTime="2026-01-22 16:25:24.66996566 +0000 UTC m=+1228.482134701" Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.688247 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:24 crc kubenswrapper[5032]: I0122 16:25:24.688342 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-659fb8687c-zp7xh" Jan 22 16:25:25 crc kubenswrapper[5032]: I0122 16:25:25.651521 5032 generic.go:334] "Generic (PLEG): container finished" podID="ceac2bce-8113-4730-a763-80e0e0395b75" containerID="c29010e63f41863822ccedd7b38a3cfc02fc9f33b92c997775d1f578c0424add" exitCode=0 Jan 22 16:25:25 crc kubenswrapper[5032]: I0122 16:25:25.651935 5032 generic.go:334] "Generic (PLEG): container finished" podID="ceac2bce-8113-4730-a763-80e0e0395b75" containerID="2897fb9ef8d10f0636802991cfb2741bb92a15f1387c1c6f15df0d7aea55d802" exitCode=2 Jan 22 16:25:25 crc kubenswrapper[5032]: I0122 16:25:25.651950 5032 generic.go:334] "Generic (PLEG): container finished" podID="ceac2bce-8113-4730-a763-80e0e0395b75" containerID="79bc601e172a20eae31efb59fd30dd1328366a2ea2e463ee9bc9a93f8dc808ad" exitCode=0 Jan 22 16:25:25 crc kubenswrapper[5032]: I0122 16:25:25.651750 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerDied","Data":"c29010e63f41863822ccedd7b38a3cfc02fc9f33b92c997775d1f578c0424add"} Jan 22 16:25:25 crc kubenswrapper[5032]: I0122 16:25:25.652266 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerDied","Data":"2897fb9ef8d10f0636802991cfb2741bb92a15f1387c1c6f15df0d7aea55d802"} Jan 22 16:25:25 crc kubenswrapper[5032]: I0122 16:25:25.652279 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerDied","Data":"79bc601e172a20eae31efb59fd30dd1328366a2ea2e463ee9bc9a93f8dc808ad"} Jan 22 16:25:29 crc kubenswrapper[5032]: I0122 16:25:29.710951 5032 generic.go:334] "Generic (PLEG): container finished" podID="ceac2bce-8113-4730-a763-80e0e0395b75" containerID="bfad2b86781384e7713b8d24c1fb8787ec379485e325ccdaed2b0bc17ab2ffab" exitCode=0 Jan 22 16:25:29 crc kubenswrapper[5032]: I0122 16:25:29.711099 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerDied","Data":"bfad2b86781384e7713b8d24c1fb8787ec379485e325ccdaed2b0bc17ab2ffab"} Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.250989 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.399694 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-config-data\") pod \"ceac2bce-8113-4730-a763-80e0e0395b75\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.400121 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-log-httpd\") pod \"ceac2bce-8113-4730-a763-80e0e0395b75\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.400347 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-combined-ca-bundle\") pod \"ceac2bce-8113-4730-a763-80e0e0395b75\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.400491 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-scripts\") pod \"ceac2bce-8113-4730-a763-80e0e0395b75\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.400690 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-sg-core-conf-yaml\") pod \"ceac2bce-8113-4730-a763-80e0e0395b75\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.400716 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ceac2bce-8113-4730-a763-80e0e0395b75" (UID: "ceac2bce-8113-4730-a763-80e0e0395b75"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.401182 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pscwk\" (UniqueName: \"kubernetes.io/projected/ceac2bce-8113-4730-a763-80e0e0395b75-kube-api-access-pscwk\") pod \"ceac2bce-8113-4730-a763-80e0e0395b75\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.401380 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-run-httpd\") pod \"ceac2bce-8113-4730-a763-80e0e0395b75\" (UID: \"ceac2bce-8113-4730-a763-80e0e0395b75\") " Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.401750 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ceac2bce-8113-4730-a763-80e0e0395b75" (UID: "ceac2bce-8113-4730-a763-80e0e0395b75"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.402278 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.402402 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceac2bce-8113-4730-a763-80e0e0395b75-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.404282 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-scripts" (OuterVolumeSpecName: "scripts") pod "ceac2bce-8113-4730-a763-80e0e0395b75" (UID: "ceac2bce-8113-4730-a763-80e0e0395b75"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.404745 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceac2bce-8113-4730-a763-80e0e0395b75-kube-api-access-pscwk" (OuterVolumeSpecName: "kube-api-access-pscwk") pod "ceac2bce-8113-4730-a763-80e0e0395b75" (UID: "ceac2bce-8113-4730-a763-80e0e0395b75"). InnerVolumeSpecName "kube-api-access-pscwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.448830 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ceac2bce-8113-4730-a763-80e0e0395b75" (UID: "ceac2bce-8113-4730-a763-80e0e0395b75"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.474810 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ceac2bce-8113-4730-a763-80e0e0395b75" (UID: "ceac2bce-8113-4730-a763-80e0e0395b75"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.504204 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pscwk\" (UniqueName: \"kubernetes.io/projected/ceac2bce-8113-4730-a763-80e0e0395b75-kube-api-access-pscwk\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.504236 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.504246 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.504256 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.507842 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-config-data" (OuterVolumeSpecName: "config-data") pod "ceac2bce-8113-4730-a763-80e0e0395b75" (UID: "ceac2bce-8113-4730-a763-80e0e0395b75"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.605739 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceac2bce-8113-4730-a763-80e0e0395b75-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.772177 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-92g57" event={"ID":"6c83b824-d7c9-413d-abdf-353330608097","Type":"ContainerStarted","Data":"093f290055f12fb95d7d1ebcfe1e49a76f1e349fddd5d0b5272b0c6c1089ec55"} Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.775684 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceac2bce-8113-4730-a763-80e0e0395b75","Type":"ContainerDied","Data":"a59bd620116e7e5f9faefd1fcf1b811c1ab7b01e5fddcfab933dc77e0eee531e"} Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.775751 5032 scope.go:117] "RemoveContainer" containerID="c29010e63f41863822ccedd7b38a3cfc02fc9f33b92c997775d1f578c0424add" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.775990 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.798800 5032 scope.go:117] "RemoveContainer" containerID="2897fb9ef8d10f0636802991cfb2741bb92a15f1387c1c6f15df0d7aea55d802" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.800572 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-92g57" podStartSLOduration=2.747718226 podStartE2EDuration="12.800544906s" podCreationTimestamp="2026-01-22 16:25:22 +0000 UTC" firstStartedPulling="2026-01-22 16:25:23.862830623 +0000 UTC m=+1227.674999654" lastFinishedPulling="2026-01-22 16:25:33.915657303 +0000 UTC m=+1237.727826334" observedRunningTime="2026-01-22 16:25:34.791389379 +0000 UTC m=+1238.603558440" watchObservedRunningTime="2026-01-22 16:25:34.800544906 +0000 UTC m=+1238.612713957" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.834358 5032 scope.go:117] "RemoveContainer" containerID="79bc601e172a20eae31efb59fd30dd1328366a2ea2e463ee9bc9a93f8dc808ad" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.836002 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.862909 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.873507 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:34 crc kubenswrapper[5032]: E0122 16:25:34.874073 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="proxy-httpd" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.874090 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="proxy-httpd" Jan 22 16:25:34 crc kubenswrapper[5032]: E0122 16:25:34.874114 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-notification-agent" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.874123 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-notification-agent" Jan 22 16:25:34 crc kubenswrapper[5032]: E0122 16:25:34.874138 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-central-agent" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.874146 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-central-agent" Jan 22 16:25:34 crc kubenswrapper[5032]: E0122 16:25:34.874158 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="sg-core" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.874164 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="sg-core" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.875114 5032 scope.go:117] "RemoveContainer" containerID="bfad2b86781384e7713b8d24c1fb8787ec379485e325ccdaed2b0bc17ab2ffab" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.887191 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-central-agent" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.887238 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="sg-core" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.887250 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="proxy-httpd" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.887276 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" containerName="ceilometer-notification-agent" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.893673 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.894199 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.897157 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.897457 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.919562 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.919837 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-log" containerID="cri-o://732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb" gracePeriod=30 Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.920053 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-httpd" containerID="cri-o://82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8" gracePeriod=30 Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.932228 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhzdp\" (UniqueName: \"kubernetes.io/projected/370b1d70-1350-4ed8-84c0-cd9d090a5d98-kube-api-access-rhzdp\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.932302 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-config-data\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.932332 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-log-httpd\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.932358 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-run-httpd\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.932395 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.932497 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:34 crc kubenswrapper[5032]: I0122 16:25:34.932532 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-scripts\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.034273 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhzdp\" (UniqueName: \"kubernetes.io/projected/370b1d70-1350-4ed8-84c0-cd9d090a5d98-kube-api-access-rhzdp\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.034340 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-config-data\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.034363 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-log-httpd\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.034382 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-run-httpd\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.034409 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.034483 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.034508 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-scripts\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.035415 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-log-httpd\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.035530 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-run-httpd\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.041380 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-config-data\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.042430 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-scripts\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.043295 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.055415 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhzdp\" (UniqueName: \"kubernetes.io/projected/370b1d70-1350-4ed8-84c0-cd9d090a5d98-kube-api-access-rhzdp\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.060690 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.246020 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.576687 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:35 crc kubenswrapper[5032]: W0122 16:25:35.578471 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod370b1d70_1350_4ed8_84c0_cd9d090a5d98.slice/crio-0fd6fa6a5117293156d27e996c646406fa950c689e240fc8a8462d8165f9ad4a WatchSource:0}: Error finding container 0fd6fa6a5117293156d27e996c646406fa950c689e240fc8a8462d8165f9ad4a: Status 404 returned error can't find the container with id 0fd6fa6a5117293156d27e996c646406fa950c689e240fc8a8462d8165f9ad4a Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.769723 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.770431 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-log" containerID="cri-o://9b3a151aa981201420cd9a43bd8ee927ffe100bfaf449bb58f6c6d31dcfe5d1e" gracePeriod=30 Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.770480 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-httpd" containerID="cri-o://b6c7424f47d322a90c1d471d03c82a562892675461e2a2cf65891b84df5835d6" gracePeriod=30 Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.784850 5032 generic.go:334] "Generic (PLEG): container finished" podID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerID="732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb" exitCode=143 Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.784919 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f750cd9d-1983-4ff8-a510-fc773b3fcfcf","Type":"ContainerDied","Data":"732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb"} Jan 22 16:25:35 crc kubenswrapper[5032]: I0122 16:25:35.787996 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerStarted","Data":"0fd6fa6a5117293156d27e996c646406fa950c689e240fc8a8462d8165f9ad4a"} Jan 22 16:25:36 crc kubenswrapper[5032]: I0122 16:25:36.469583 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ceac2bce-8113-4730-a763-80e0e0395b75" path="/var/lib/kubelet/pods/ceac2bce-8113-4730-a763-80e0e0395b75/volumes" Jan 22 16:25:36 crc kubenswrapper[5032]: I0122 16:25:36.779096 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:36 crc kubenswrapper[5032]: I0122 16:25:36.810181 5032 generic.go:334] "Generic (PLEG): container finished" podID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerID="9b3a151aa981201420cd9a43bd8ee927ffe100bfaf449bb58f6c6d31dcfe5d1e" exitCode=143 Jan 22 16:25:36 crc kubenswrapper[5032]: I0122 16:25:36.810248 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0116e396-0fd1-4a28-bcc7-d2712c0cfa93","Type":"ContainerDied","Data":"9b3a151aa981201420cd9a43bd8ee927ffe100bfaf449bb58f6c6d31dcfe5d1e"} Jan 22 16:25:36 crc kubenswrapper[5032]: I0122 16:25:36.813944 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerStarted","Data":"268d93dfa7c89d797585e318654b97b45dfa70c15ae4846046696fd59dbeb88b"} Jan 22 16:25:37 crc kubenswrapper[5032]: I0122 16:25:37.829125 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerStarted","Data":"bdbda56f7f042772642f178e95bcd257a3772a4e7e3d2491046ad6124022a599"} Jan 22 16:25:37 crc kubenswrapper[5032]: I0122 16:25:37.830142 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerStarted","Data":"e9c5547d277ef69a8e36044d8a21d259c0c38a16407229fd83db8a518eef339b"} Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.702165 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815332 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-combined-ca-bundle\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815474 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-public-tls-certs\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815545 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-httpd-run\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815587 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-config-data\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815623 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-logs\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815706 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzxwr\" (UniqueName: \"kubernetes.io/projected/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-kube-api-access-vzxwr\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815778 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815830 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-scripts\") pod \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\" (UID: \"f750cd9d-1983-4ff8-a510-fc773b3fcfcf\") " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.815996 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.816079 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-logs" (OuterVolumeSpecName: "logs") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.816390 5032 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.816410 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.841536 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-scripts" (OuterVolumeSpecName: "scripts") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.841561 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.842358 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-kube-api-access-vzxwr" (OuterVolumeSpecName: "kube-api-access-vzxwr") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "kube-api-access-vzxwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.852734 5032 generic.go:334] "Generic (PLEG): container finished" podID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerID="82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8" exitCode=0 Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.853524 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.853012 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f750cd9d-1983-4ff8-a510-fc773b3fcfcf","Type":"ContainerDied","Data":"82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8"} Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.853696 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f750cd9d-1983-4ff8-a510-fc773b3fcfcf","Type":"ContainerDied","Data":"0401bf6ac9ba45d5923a2cca342c7ed3c14b9913d9aee1c8ba142f8d43ab8915"} Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.853725 5032 scope.go:117] "RemoveContainer" containerID="82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.854714 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.890874 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.915986 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-config-data" (OuterVolumeSpecName: "config-data") pod "f750cd9d-1983-4ff8-a510-fc773b3fcfcf" (UID: "f750cd9d-1983-4ff8-a510-fc773b3fcfcf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.920579 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.920623 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzxwr\" (UniqueName: \"kubernetes.io/projected/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-kube-api-access-vzxwr\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.920677 5032 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.920688 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.920696 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.920709 5032 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f750cd9d-1983-4ff8-a510-fc773b3fcfcf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.952258 5032 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 22 16:25:38 crc kubenswrapper[5032]: I0122 16:25:38.975637 5032 scope.go:117] "RemoveContainer" containerID="732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.019665 5032 scope.go:117] "RemoveContainer" containerID="82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8" Jan 22 16:25:39 crc kubenswrapper[5032]: E0122 16:25:39.020074 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8\": container with ID starting with 82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8 not found: ID does not exist" containerID="82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.020105 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8"} err="failed to get container status \"82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8\": rpc error: code = NotFound desc = could not find container \"82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8\": container with ID starting with 82a58251871e106207b56ee22be75cda8a9d67acacc5fff9f84b5edd045423f8 not found: ID does not exist" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.020131 5032 scope.go:117] "RemoveContainer" containerID="732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb" Jan 22 16:25:39 crc kubenswrapper[5032]: E0122 16:25:39.020419 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb\": container with ID starting with 732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb not found: ID does not exist" containerID="732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.020450 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb"} err="failed to get container status \"732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb\": rpc error: code = NotFound desc = could not find container \"732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb\": container with ID starting with 732411c507921d85ad75ec3795fbd6226f1e4fb936f466632c26c0240afa98bb not found: ID does not exist" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.023699 5032 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.322228 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.330668 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.352499 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:25:39 crc kubenswrapper[5032]: E0122 16:25:39.352927 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-httpd" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.352948 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-httpd" Jan 22 16:25:39 crc kubenswrapper[5032]: E0122 16:25:39.352964 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-log" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.352972 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-log" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.353278 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-log" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.353303 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" containerName="glance-httpd" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.354307 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.356400 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.356674 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.373550 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431068 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-config-data\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431140 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-scripts\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431183 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0f74898-1fad-4f88-896b-9babae7beba1-logs\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431233 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431261 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431286 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn2q8\" (UniqueName: \"kubernetes.io/projected/f0f74898-1fad-4f88-896b-9babae7beba1-kube-api-access-mn2q8\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431327 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f0f74898-1fad-4f88-896b-9babae7beba1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.431348 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.532818 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.532880 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.532922 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn2q8\" (UniqueName: \"kubernetes.io/projected/f0f74898-1fad-4f88-896b-9babae7beba1-kube-api-access-mn2q8\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.532962 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.532989 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f0f74898-1fad-4f88-896b-9babae7beba1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.533103 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-config-data\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.533141 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-scripts\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.533177 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0f74898-1fad-4f88-896b-9babae7beba1-logs\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.533283 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.534444 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f0f74898-1fad-4f88-896b-9babae7beba1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.535200 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0f74898-1fad-4f88-896b-9babae7beba1-logs\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.538468 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.538923 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-config-data\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.539657 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.540290 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0f74898-1fad-4f88-896b-9babae7beba1-scripts\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.558624 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn2q8\" (UniqueName: \"kubernetes.io/projected/f0f74898-1fad-4f88-896b-9babae7beba1-kube-api-access-mn2q8\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.589747 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f0f74898-1fad-4f88-896b-9babae7beba1\") " pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.675585 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.880646 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerStarted","Data":"773d372463d1716aae14a710bae07c44cf2d257ce961dd6d66e3c5e16b866d31"} Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.880934 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-central-agent" containerID="cri-o://268d93dfa7c89d797585e318654b97b45dfa70c15ae4846046696fd59dbeb88b" gracePeriod=30 Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.881336 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.881470 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="proxy-httpd" containerID="cri-o://773d372463d1716aae14a710bae07c44cf2d257ce961dd6d66e3c5e16b866d31" gracePeriod=30 Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.881670 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-notification-agent" containerID="cri-o://e9c5547d277ef69a8e36044d8a21d259c0c38a16407229fd83db8a518eef339b" gracePeriod=30 Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.881729 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="sg-core" containerID="cri-o://bdbda56f7f042772642f178e95bcd257a3772a4e7e3d2491046ad6124022a599" gracePeriod=30 Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.894490 5032 generic.go:334] "Generic (PLEG): container finished" podID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerID="b6c7424f47d322a90c1d471d03c82a562892675461e2a2cf65891b84df5835d6" exitCode=0 Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.894582 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0116e396-0fd1-4a28-bcc7-d2712c0cfa93","Type":"ContainerDied","Data":"b6c7424f47d322a90c1d471d03c82a562892675461e2a2cf65891b84df5835d6"} Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.927521 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.37218758 podStartE2EDuration="5.927496037s" podCreationTimestamp="2026-01-22 16:25:34 +0000 UTC" firstStartedPulling="2026-01-22 16:25:35.580234052 +0000 UTC m=+1239.392403083" lastFinishedPulling="2026-01-22 16:25:39.135542509 +0000 UTC m=+1242.947711540" observedRunningTime="2026-01-22 16:25:39.908177906 +0000 UTC m=+1243.720346937" watchObservedRunningTime="2026-01-22 16:25:39.927496037 +0000 UTC m=+1243.739665068" Jan 22 16:25:39 crc kubenswrapper[5032]: I0122 16:25:39.934439 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051471 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-internal-tls-certs\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051530 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtmfb\" (UniqueName: \"kubernetes.io/projected/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-kube-api-access-xtmfb\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051565 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-logs\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051595 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-combined-ca-bundle\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051648 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-config-data\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051721 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051773 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-httpd-run\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.051843 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-scripts\") pod \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\" (UID: \"0116e396-0fd1-4a28-bcc7-d2712c0cfa93\") " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.053223 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.054140 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-logs" (OuterVolumeSpecName: "logs") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.056991 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-scripts" (OuterVolumeSpecName: "scripts") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.057373 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.057511 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-kube-api-access-xtmfb" (OuterVolumeSpecName: "kube-api-access-xtmfb") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "kube-api-access-xtmfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.103221 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.104336 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.111610 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-config-data" (OuterVolumeSpecName: "config-data") pod "0116e396-0fd1-4a28-bcc7-d2712c0cfa93" (UID: "0116e396-0fd1-4a28-bcc7-d2712c0cfa93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159645 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159707 5032 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159723 5032 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159735 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159746 5032 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159784 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtmfb\" (UniqueName: \"kubernetes.io/projected/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-kube-api-access-xtmfb\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159798 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.159809 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0116e396-0fd1-4a28-bcc7-d2712c0cfa93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.189370 5032 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.262382 5032 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.305510 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.467516 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f750cd9d-1983-4ff8-a510-fc773b3fcfcf" path="/var/lib/kubelet/pods/f750cd9d-1983-4ff8-a510-fc773b3fcfcf/volumes" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.906279 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0116e396-0fd1-4a28-bcc7-d2712c0cfa93","Type":"ContainerDied","Data":"08835a8819cdd5bd9e77b70a2c90745b94e2c11a682b53f04742c7e61e48e584"} Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.906336 5032 scope.go:117] "RemoveContainer" containerID="b6c7424f47d322a90c1d471d03c82a562892675461e2a2cf65891b84df5835d6" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.906479 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.910761 5032 generic.go:334] "Generic (PLEG): container finished" podID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerID="773d372463d1716aae14a710bae07c44cf2d257ce961dd6d66e3c5e16b866d31" exitCode=0 Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.910791 5032 generic.go:334] "Generic (PLEG): container finished" podID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerID="bdbda56f7f042772642f178e95bcd257a3772a4e7e3d2491046ad6124022a599" exitCode=2 Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.910800 5032 generic.go:334] "Generic (PLEG): container finished" podID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerID="e9c5547d277ef69a8e36044d8a21d259c0c38a16407229fd83db8a518eef339b" exitCode=0 Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.910834 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerDied","Data":"773d372463d1716aae14a710bae07c44cf2d257ce961dd6d66e3c5e16b866d31"} Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.910902 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerDied","Data":"bdbda56f7f042772642f178e95bcd257a3772a4e7e3d2491046ad6124022a599"} Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.910915 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerDied","Data":"e9c5547d277ef69a8e36044d8a21d259c0c38a16407229fd83db8a518eef339b"} Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.912891 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f0f74898-1fad-4f88-896b-9babae7beba1","Type":"ContainerStarted","Data":"17ebdcef5ab6ee7e7710e36d22936f220286f6ee9385e709655b45712e71bc87"} Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.929708 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.930100 5032 scope.go:117] "RemoveContainer" containerID="9b3a151aa981201420cd9a43bd8ee927ffe100bfaf449bb58f6c6d31dcfe5d1e" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.939962 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.957847 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:25:40 crc kubenswrapper[5032]: E0122 16:25:40.958232 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-httpd" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.958249 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-httpd" Jan 22 16:25:40 crc kubenswrapper[5032]: E0122 16:25:40.958272 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-log" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.958278 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-log" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.958463 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-httpd" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.958483 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" containerName="glance-log" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.959378 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.962077 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.962275 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973211 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973261 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/28e61081-98f8-4f20-9086-5ff5d5bff90c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973287 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973303 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973321 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e61081-98f8-4f20-9086-5ff5d5bff90c-logs\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973408 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973417 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973524 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:40 crc kubenswrapper[5032]: I0122 16:25:40.973547 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vps7p\" (UniqueName: \"kubernetes.io/projected/28e61081-98f8-4f20-9086-5ff5d5bff90c-kube-api-access-vps7p\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.074792 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.074865 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/28e61081-98f8-4f20-9086-5ff5d5bff90c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.074886 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.074903 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.074919 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e61081-98f8-4f20-9086-5ff5d5bff90c-logs\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.075015 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.075044 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.075061 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vps7p\" (UniqueName: \"kubernetes.io/projected/28e61081-98f8-4f20-9086-5ff5d5bff90c-kube-api-access-vps7p\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.078004 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/28e61081-98f8-4f20-9086-5ff5d5bff90c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.080180 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.080342 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e61081-98f8-4f20-9086-5ff5d5bff90c-logs\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.080960 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.081923 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.082144 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.087717 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e61081-98f8-4f20-9086-5ff5d5bff90c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.094309 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vps7p\" (UniqueName: \"kubernetes.io/projected/28e61081-98f8-4f20-9086-5ff5d5bff90c-kube-api-access-vps7p\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.107446 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"28e61081-98f8-4f20-9086-5ff5d5bff90c\") " pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.274384 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.811174 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 16:25:41 crc kubenswrapper[5032]: W0122 16:25:41.819072 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28e61081_98f8_4f20_9086_5ff5d5bff90c.slice/crio-84b26e09141643899ee330f6d7b5b845866dcaacff06cf571409474dfccd6708 WatchSource:0}: Error finding container 84b26e09141643899ee330f6d7b5b845866dcaacff06cf571409474dfccd6708: Status 404 returned error can't find the container with id 84b26e09141643899ee330f6d7b5b845866dcaacff06cf571409474dfccd6708 Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.923222 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"28e61081-98f8-4f20-9086-5ff5d5bff90c","Type":"ContainerStarted","Data":"84b26e09141643899ee330f6d7b5b845866dcaacff06cf571409474dfccd6708"} Jan 22 16:25:41 crc kubenswrapper[5032]: I0122 16:25:41.927200 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f0f74898-1fad-4f88-896b-9babae7beba1","Type":"ContainerStarted","Data":"577577eaf0207ed06af661b7ae49c15ff7f9245ce08c1ba0db21232fb587bcfa"} Jan 22 16:25:42 crc kubenswrapper[5032]: I0122 16:25:42.468831 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0116e396-0fd1-4a28-bcc7-d2712c0cfa93" path="/var/lib/kubelet/pods/0116e396-0fd1-4a28-bcc7-d2712c0cfa93/volumes" Jan 22 16:25:42 crc kubenswrapper[5032]: I0122 16:25:42.938780 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f0f74898-1fad-4f88-896b-9babae7beba1","Type":"ContainerStarted","Data":"b42c590e6c4ff5b4aa63f8446f627ccf6cf06aede29b9121ec05336be60cdbcc"} Jan 22 16:25:42 crc kubenswrapper[5032]: I0122 16:25:42.943983 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"28e61081-98f8-4f20-9086-5ff5d5bff90c","Type":"ContainerStarted","Data":"06830d12ae3560f34482ad018cde329971052a283fee893b7cd1a7c23f53a846"} Jan 22 16:25:42 crc kubenswrapper[5032]: I0122 16:25:42.944045 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"28e61081-98f8-4f20-9086-5ff5d5bff90c","Type":"ContainerStarted","Data":"85ed4b42dcc3ee306c661f0bfe7124b5eea307965981b92a5e02463fecfb610e"} Jan 22 16:25:42 crc kubenswrapper[5032]: I0122 16:25:42.968279 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.9682571380000002 podStartE2EDuration="3.968257138s" podCreationTimestamp="2026-01-22 16:25:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:42.965715089 +0000 UTC m=+1246.777884130" watchObservedRunningTime="2026-01-22 16:25:42.968257138 +0000 UTC m=+1246.780426199" Jan 22 16:25:43 crc kubenswrapper[5032]: I0122 16:25:43.013001 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.012967884 podStartE2EDuration="3.012967884s" podCreationTimestamp="2026-01-22 16:25:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:42.992729898 +0000 UTC m=+1246.804898959" watchObservedRunningTime="2026-01-22 16:25:43.012967884 +0000 UTC m=+1246.825136915" Jan 22 16:25:45 crc kubenswrapper[5032]: I0122 16:25:45.977989 5032 generic.go:334] "Generic (PLEG): container finished" podID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerID="268d93dfa7c89d797585e318654b97b45dfa70c15ae4846046696fd59dbeb88b" exitCode=0 Jan 22 16:25:45 crc kubenswrapper[5032]: I0122 16:25:45.978086 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerDied","Data":"268d93dfa7c89d797585e318654b97b45dfa70c15ae4846046696fd59dbeb88b"} Jan 22 16:25:45 crc kubenswrapper[5032]: I0122 16:25:45.978628 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b1d70-1350-4ed8-84c0-cd9d090a5d98","Type":"ContainerDied","Data":"0fd6fa6a5117293156d27e996c646406fa950c689e240fc8a8462d8165f9ad4a"} Jan 22 16:25:45 crc kubenswrapper[5032]: I0122 16:25:45.978645 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0fd6fa6a5117293156d27e996c646406fa950c689e240fc8a8462d8165f9ad4a" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.006280 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.079763 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-sg-core-conf-yaml\") pod \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.079847 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-scripts\") pod \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.079947 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-config-data\") pod \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.079982 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhzdp\" (UniqueName: \"kubernetes.io/projected/370b1d70-1350-4ed8-84c0-cd9d090a5d98-kube-api-access-rhzdp\") pod \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.080032 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-log-httpd\") pod \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.080054 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-combined-ca-bundle\") pod \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.080102 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-run-httpd\") pod \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\" (UID: \"370b1d70-1350-4ed8-84c0-cd9d090a5d98\") " Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.080572 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "370b1d70-1350-4ed8-84c0-cd9d090a5d98" (UID: "370b1d70-1350-4ed8-84c0-cd9d090a5d98"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.081051 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "370b1d70-1350-4ed8-84c0-cd9d090a5d98" (UID: "370b1d70-1350-4ed8-84c0-cd9d090a5d98"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.086185 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-scripts" (OuterVolumeSpecName: "scripts") pod "370b1d70-1350-4ed8-84c0-cd9d090a5d98" (UID: "370b1d70-1350-4ed8-84c0-cd9d090a5d98"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.086637 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/370b1d70-1350-4ed8-84c0-cd9d090a5d98-kube-api-access-rhzdp" (OuterVolumeSpecName: "kube-api-access-rhzdp") pod "370b1d70-1350-4ed8-84c0-cd9d090a5d98" (UID: "370b1d70-1350-4ed8-84c0-cd9d090a5d98"). InnerVolumeSpecName "kube-api-access-rhzdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.135424 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "370b1d70-1350-4ed8-84c0-cd9d090a5d98" (UID: "370b1d70-1350-4ed8-84c0-cd9d090a5d98"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.175197 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "370b1d70-1350-4ed8-84c0-cd9d090a5d98" (UID: "370b1d70-1350-4ed8-84c0-cd9d090a5d98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.182415 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.182445 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.182458 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b1d70-1350-4ed8-84c0-cd9d090a5d98-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.182467 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.182476 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.182484 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhzdp\" (UniqueName: \"kubernetes.io/projected/370b1d70-1350-4ed8-84c0-cd9d090a5d98-kube-api-access-rhzdp\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.189907 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-config-data" (OuterVolumeSpecName: "config-data") pod "370b1d70-1350-4ed8-84c0-cd9d090a5d98" (UID: "370b1d70-1350-4ed8-84c0-cd9d090a5d98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.284943 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b1d70-1350-4ed8-84c0-cd9d090a5d98-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:46 crc kubenswrapper[5032]: I0122 16:25:46.990787 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.018617 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.028159 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.049043 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:47 crc kubenswrapper[5032]: E0122 16:25:47.050133 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-notification-agent" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050155 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-notification-agent" Jan 22 16:25:47 crc kubenswrapper[5032]: E0122 16:25:47.050182 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="proxy-httpd" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050189 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="proxy-httpd" Jan 22 16:25:47 crc kubenswrapper[5032]: E0122 16:25:47.050201 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-central-agent" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050207 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-central-agent" Jan 22 16:25:47 crc kubenswrapper[5032]: E0122 16:25:47.050227 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="sg-core" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050232 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="sg-core" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050393 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-central-agent" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050407 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="sg-core" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050414 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="ceilometer-notification-agent" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.050427 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" containerName="proxy-httpd" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.056463 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.058908 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.063236 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.067317 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.203102 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-config-data\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.203153 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-log-httpd\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.203217 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-scripts\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.203371 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-run-httpd\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.203566 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjcsg\" (UniqueName: \"kubernetes.io/projected/d727e693-4533-48ed-aaa6-0997e93bec5a-kube-api-access-hjcsg\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.203702 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.203881 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305068 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjcsg\" (UniqueName: \"kubernetes.io/projected/d727e693-4533-48ed-aaa6-0997e93bec5a-kube-api-access-hjcsg\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305126 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305185 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305230 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-config-data\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305253 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-log-httpd\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305280 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-scripts\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305302 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-run-httpd\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305847 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-run-httpd\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.305944 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-log-httpd\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.311960 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-scripts\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.312151 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.313376 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.326698 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjcsg\" (UniqueName: \"kubernetes.io/projected/d727e693-4533-48ed-aaa6-0997e93bec5a-kube-api-access-hjcsg\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.327056 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-config-data\") pod \"ceilometer-0\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.394352 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:47 crc kubenswrapper[5032]: I0122 16:25:47.734188 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:47 crc kubenswrapper[5032]: W0122 16:25:47.738934 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd727e693_4533_48ed_aaa6_0997e93bec5a.slice/crio-f9412d86f12142d3c8dfdb5e350e01a0201ac63a1651459f69694baffc9f9637 WatchSource:0}: Error finding container f9412d86f12142d3c8dfdb5e350e01a0201ac63a1651459f69694baffc9f9637: Status 404 returned error can't find the container with id f9412d86f12142d3c8dfdb5e350e01a0201ac63a1651459f69694baffc9f9637 Jan 22 16:25:48 crc kubenswrapper[5032]: I0122 16:25:48.006739 5032 generic.go:334] "Generic (PLEG): container finished" podID="6c83b824-d7c9-413d-abdf-353330608097" containerID="093f290055f12fb95d7d1ebcfe1e49a76f1e349fddd5d0b5272b0c6c1089ec55" exitCode=0 Jan 22 16:25:48 crc kubenswrapper[5032]: I0122 16:25:48.006807 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-92g57" event={"ID":"6c83b824-d7c9-413d-abdf-353330608097","Type":"ContainerDied","Data":"093f290055f12fb95d7d1ebcfe1e49a76f1e349fddd5d0b5272b0c6c1089ec55"} Jan 22 16:25:48 crc kubenswrapper[5032]: I0122 16:25:48.008425 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerStarted","Data":"f9412d86f12142d3c8dfdb5e350e01a0201ac63a1651459f69694baffc9f9637"} Jan 22 16:25:48 crc kubenswrapper[5032]: I0122 16:25:48.475016 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="370b1d70-1350-4ed8-84c0-cd9d090a5d98" path="/var/lib/kubelet/pods/370b1d70-1350-4ed8-84c0-cd9d090a5d98/volumes" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.021543 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerStarted","Data":"5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1"} Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.442084 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.555338 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-scripts\") pod \"6c83b824-d7c9-413d-abdf-353330608097\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.555388 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jfhf\" (UniqueName: \"kubernetes.io/projected/6c83b824-d7c9-413d-abdf-353330608097-kube-api-access-5jfhf\") pod \"6c83b824-d7c9-413d-abdf-353330608097\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.555436 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-combined-ca-bundle\") pod \"6c83b824-d7c9-413d-abdf-353330608097\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.555506 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-config-data\") pod \"6c83b824-d7c9-413d-abdf-353330608097\" (UID: \"6c83b824-d7c9-413d-abdf-353330608097\") " Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.571097 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-scripts" (OuterVolumeSpecName: "scripts") pod "6c83b824-d7c9-413d-abdf-353330608097" (UID: "6c83b824-d7c9-413d-abdf-353330608097"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.571115 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c83b824-d7c9-413d-abdf-353330608097-kube-api-access-5jfhf" (OuterVolumeSpecName: "kube-api-access-5jfhf") pod "6c83b824-d7c9-413d-abdf-353330608097" (UID: "6c83b824-d7c9-413d-abdf-353330608097"). InnerVolumeSpecName "kube-api-access-5jfhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.583807 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-config-data" (OuterVolumeSpecName: "config-data") pod "6c83b824-d7c9-413d-abdf-353330608097" (UID: "6c83b824-d7c9-413d-abdf-353330608097"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.588336 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c83b824-d7c9-413d-abdf-353330608097" (UID: "6c83b824-d7c9-413d-abdf-353330608097"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.658207 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.658251 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jfhf\" (UniqueName: \"kubernetes.io/projected/6c83b824-d7c9-413d-abdf-353330608097-kube-api-access-5jfhf\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.658267 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.658280 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c83b824-d7c9-413d-abdf-353330608097-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.676648 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.677284 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.723805 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 16:25:49 crc kubenswrapper[5032]: I0122 16:25:49.741687 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.033423 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerStarted","Data":"51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e"} Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.037259 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-92g57" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.037276 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-92g57" event={"ID":"6c83b824-d7c9-413d-abdf-353330608097","Type":"ContainerDied","Data":"a1aa721de96b8bc51e5fd9157b476593c0fa75dc454a22c89e1f4aa7e2b02a98"} Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.037423 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1aa721de96b8bc51e5fd9157b476593c0fa75dc454a22c89e1f4aa7e2b02a98" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.037438 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.037449 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.186637 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:25:50 crc kubenswrapper[5032]: E0122 16:25:50.187221 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c83b824-d7c9-413d-abdf-353330608097" containerName="nova-cell0-conductor-db-sync" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.187238 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c83b824-d7c9-413d-abdf-353330608097" containerName="nova-cell0-conductor-db-sync" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.187415 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c83b824-d7c9-413d-abdf-353330608097" containerName="nova-cell0-conductor-db-sync" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.187998 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.192992 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mv6pk" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.193175 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.203434 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.271016 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.271058 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqvs7\" (UniqueName: \"kubernetes.io/projected/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-kube-api-access-fqvs7\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.271142 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.372246 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.372369 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.372394 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqvs7\" (UniqueName: \"kubernetes.io/projected/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-kube-api-access-fqvs7\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.376495 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.384946 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.394022 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqvs7\" (UniqueName: \"kubernetes.io/projected/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-kube-api-access-fqvs7\") pod \"nova-cell0-conductor-0\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:50 crc kubenswrapper[5032]: I0122 16:25:50.523569 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:51 crc kubenswrapper[5032]: I0122 16:25:51.049833 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerStarted","Data":"daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c"} Jan 22 16:25:51 crc kubenswrapper[5032]: I0122 16:25:51.061778 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:25:51 crc kubenswrapper[5032]: I0122 16:25:51.079373 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:25:51 crc kubenswrapper[5032]: I0122 16:25:51.275306 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:51 crc kubenswrapper[5032]: I0122 16:25:51.275817 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:51 crc kubenswrapper[5032]: I0122 16:25:51.329817 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:51 crc kubenswrapper[5032]: I0122 16:25:51.364298 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.062464 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerStarted","Data":"c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af"} Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.062940 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.064545 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4be2c6b7-f91a-44a6-b698-9019bd5dd86a","Type":"ContainerStarted","Data":"5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e"} Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.064592 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4be2c6b7-f91a-44a6-b698-9019bd5dd86a","Type":"ContainerStarted","Data":"a5311ec68a50429700292f9241dcce9fb8180c7424dee26bb97c91d39b2ca493"} Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.064598 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.064617 5032 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.065101 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.065135 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.065188 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" containerID="cri-o://5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" gracePeriod=30 Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.072453 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.107334 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.109168 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.048964412 podStartE2EDuration="5.109150005s" podCreationTimestamp="2026-01-22 16:25:47 +0000 UTC" firstStartedPulling="2026-01-22 16:25:47.740695448 +0000 UTC m=+1251.552864489" lastFinishedPulling="2026-01-22 16:25:51.800881051 +0000 UTC m=+1255.613050082" observedRunningTime="2026-01-22 16:25:52.091891319 +0000 UTC m=+1255.904060350" watchObservedRunningTime="2026-01-22 16:25:52.109150005 +0000 UTC m=+1255.921319036" Jan 22 16:25:52 crc kubenswrapper[5032]: I0122 16:25:52.110346 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.110339087 podStartE2EDuration="2.110339087s" podCreationTimestamp="2026-01-22 16:25:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:25:52.108002904 +0000 UTC m=+1255.920171935" watchObservedRunningTime="2026-01-22 16:25:52.110339087 +0000 UTC m=+1255.922508118" Jan 22 16:25:53 crc kubenswrapper[5032]: I0122 16:25:53.003124 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:25:54 crc kubenswrapper[5032]: I0122 16:25:54.045583 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:54 crc kubenswrapper[5032]: I0122 16:25:54.048297 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 16:25:54 crc kubenswrapper[5032]: I0122 16:25:54.080788 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-central-agent" containerID="cri-o://5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1" gracePeriod=30 Jan 22 16:25:54 crc kubenswrapper[5032]: I0122 16:25:54.080988 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="proxy-httpd" containerID="cri-o://c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af" gracePeriod=30 Jan 22 16:25:54 crc kubenswrapper[5032]: I0122 16:25:54.081065 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="sg-core" containerID="cri-o://daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c" gracePeriod=30 Jan 22 16:25:54 crc kubenswrapper[5032]: I0122 16:25:54.081130 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-notification-agent" containerID="cri-o://51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e" gracePeriod=30 Jan 22 16:25:55 crc kubenswrapper[5032]: I0122 16:25:55.091750 5032 generic.go:334] "Generic (PLEG): container finished" podID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerID="c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af" exitCode=0 Jan 22 16:25:55 crc kubenswrapper[5032]: I0122 16:25:55.092025 5032 generic.go:334] "Generic (PLEG): container finished" podID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerID="daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c" exitCode=2 Jan 22 16:25:55 crc kubenswrapper[5032]: I0122 16:25:55.092035 5032 generic.go:334] "Generic (PLEG): container finished" podID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerID="51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e" exitCode=0 Jan 22 16:25:55 crc kubenswrapper[5032]: I0122 16:25:55.091825 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerDied","Data":"c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af"} Jan 22 16:25:55 crc kubenswrapper[5032]: I0122 16:25:55.092148 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerDied","Data":"daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c"} Jan 22 16:25:55 crc kubenswrapper[5032]: I0122 16:25:55.092166 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerDied","Data":"51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e"} Jan 22 16:25:55 crc kubenswrapper[5032]: I0122 16:25:55.524593 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 22 16:25:59 crc kubenswrapper[5032]: E0122 16:25:59.572273 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd727e693_4533_48ed_aaa6_0997e93bec5a.slice/crio-conmon-5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.686184 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.863527 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-combined-ca-bundle\") pod \"d727e693-4533-48ed-aaa6-0997e93bec5a\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.863693 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-scripts\") pod \"d727e693-4533-48ed-aaa6-0997e93bec5a\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.863882 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-log-httpd\") pod \"d727e693-4533-48ed-aaa6-0997e93bec5a\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.863933 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-sg-core-conf-yaml\") pod \"d727e693-4533-48ed-aaa6-0997e93bec5a\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.864008 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-config-data\") pod \"d727e693-4533-48ed-aaa6-0997e93bec5a\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.864059 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-run-httpd\") pod \"d727e693-4533-48ed-aaa6-0997e93bec5a\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.864105 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjcsg\" (UniqueName: \"kubernetes.io/projected/d727e693-4533-48ed-aaa6-0997e93bec5a-kube-api-access-hjcsg\") pod \"d727e693-4533-48ed-aaa6-0997e93bec5a\" (UID: \"d727e693-4533-48ed-aaa6-0997e93bec5a\") " Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.864386 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d727e693-4533-48ed-aaa6-0997e93bec5a" (UID: "d727e693-4533-48ed-aaa6-0997e93bec5a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.864427 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d727e693-4533-48ed-aaa6-0997e93bec5a" (UID: "d727e693-4533-48ed-aaa6-0997e93bec5a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.865284 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.865321 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d727e693-4533-48ed-aaa6-0997e93bec5a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.870174 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-scripts" (OuterVolumeSpecName: "scripts") pod "d727e693-4533-48ed-aaa6-0997e93bec5a" (UID: "d727e693-4533-48ed-aaa6-0997e93bec5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.871475 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d727e693-4533-48ed-aaa6-0997e93bec5a-kube-api-access-hjcsg" (OuterVolumeSpecName: "kube-api-access-hjcsg") pod "d727e693-4533-48ed-aaa6-0997e93bec5a" (UID: "d727e693-4533-48ed-aaa6-0997e93bec5a"). InnerVolumeSpecName "kube-api-access-hjcsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.903305 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d727e693-4533-48ed-aaa6-0997e93bec5a" (UID: "d727e693-4533-48ed-aaa6-0997e93bec5a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.948041 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d727e693-4533-48ed-aaa6-0997e93bec5a" (UID: "d727e693-4533-48ed-aaa6-0997e93bec5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.981984 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-config-data" (OuterVolumeSpecName: "config-data") pod "d727e693-4533-48ed-aaa6-0997e93bec5a" (UID: "d727e693-4533-48ed-aaa6-0997e93bec5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.983369 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.983428 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.983447 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjcsg\" (UniqueName: \"kubernetes.io/projected/d727e693-4533-48ed-aaa6-0997e93bec5a-kube-api-access-hjcsg\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.983464 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:25:59 crc kubenswrapper[5032]: I0122 16:25:59.983476 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d727e693-4533-48ed-aaa6-0997e93bec5a-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.149208 5032 generic.go:334] "Generic (PLEG): container finished" podID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerID="5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1" exitCode=0 Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.149251 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerDied","Data":"5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1"} Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.149297 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d727e693-4533-48ed-aaa6-0997e93bec5a","Type":"ContainerDied","Data":"f9412d86f12142d3c8dfdb5e350e01a0201ac63a1651459f69694baffc9f9637"} Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.149320 5032 scope.go:117] "RemoveContainer" containerID="c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.149334 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.168425 5032 scope.go:117] "RemoveContainer" containerID="daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.202345 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.209824 5032 scope.go:117] "RemoveContainer" containerID="51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.212581 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.239938 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.240486 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="sg-core" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.240509 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="sg-core" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.240533 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-notification-agent" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.240568 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-notification-agent" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.240591 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-central-agent" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.240600 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-central-agent" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.240668 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="proxy-httpd" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.240678 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="proxy-httpd" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.241011 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="proxy-httpd" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.241064 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-notification-agent" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.241083 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="ceilometer-central-agent" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.241098 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" containerName="sg-core" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.242701 5032 scope.go:117] "RemoveContainer" containerID="5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.244737 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.247540 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.247744 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.256171 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.267609 5032 scope.go:117] "RemoveContainer" containerID="c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.268195 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af\": container with ID starting with c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af not found: ID does not exist" containerID="c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.268245 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af"} err="failed to get container status \"c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af\": rpc error: code = NotFound desc = could not find container \"c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af\": container with ID starting with c448fe0ff48abfe8e75a4c7cc147df2ec6923e64919f667da5f1d991ab8889af not found: ID does not exist" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.268276 5032 scope.go:117] "RemoveContainer" containerID="daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.268604 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c\": container with ID starting with daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c not found: ID does not exist" containerID="daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.268639 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c"} err="failed to get container status \"daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c\": rpc error: code = NotFound desc = could not find container \"daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c\": container with ID starting with daf78801b18912be72abb9c02aff7979a08d9c51b5b6c47eb361e5c6ad15b10c not found: ID does not exist" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.268661 5032 scope.go:117] "RemoveContainer" containerID="51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.268996 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e\": container with ID starting with 51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e not found: ID does not exist" containerID="51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.269052 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e"} err="failed to get container status \"51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e\": rpc error: code = NotFound desc = could not find container \"51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e\": container with ID starting with 51cf2b747c6b55723d62f1b1941555ca1353f112314d15db99db831a2676b71e not found: ID does not exist" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.269089 5032 scope.go:117] "RemoveContainer" containerID="5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.269399 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1\": container with ID starting with 5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1 not found: ID does not exist" containerID="5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.269467 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1"} err="failed to get container status \"5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1\": rpc error: code = NotFound desc = could not find container \"5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1\": container with ID starting with 5e3964cb440ce310f78424c059551869132a60c259ae65b1c36b586118cdb5a1 not found: ID does not exist" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.390037 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.390093 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-run-httpd\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.390151 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-config-data\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.390174 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-scripts\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.390399 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.390516 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kv79\" (UniqueName: \"kubernetes.io/projected/d8193f75-058b-4688-a37c-794e91abe580-kube-api-access-6kv79\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.390623 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-log-httpd\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.474219 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d727e693-4533-48ed-aaa6-0997e93bec5a" path="/var/lib/kubelet/pods/d727e693-4533-48ed-aaa6-0997e93bec5a/volumes" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.491991 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.492077 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kv79\" (UniqueName: \"kubernetes.io/projected/d8193f75-058b-4688-a37c-794e91abe580-kube-api-access-6kv79\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.492140 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-log-httpd\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.492249 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.492322 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-run-httpd\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.492423 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-config-data\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.492518 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-scripts\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.493199 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-run-httpd\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.493642 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-log-httpd\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.498503 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.498769 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.499119 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-scripts\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.504285 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-config-data\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.522644 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kv79\" (UniqueName: \"kubernetes.io/projected/d8193f75-058b-4688-a37c-794e91abe580-kube-api-access-6kv79\") pod \"ceilometer-0\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " pod="openstack/ceilometer-0" Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.528357 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.530702 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.537113 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:00 crc kubenswrapper[5032]: E0122 16:26:00.537178 5032 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:00 crc kubenswrapper[5032]: I0122 16:26:00.567796 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:26:01 crc kubenswrapper[5032]: I0122 16:26:01.042334 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:01 crc kubenswrapper[5032]: I0122 16:26:01.158340 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerStarted","Data":"107aecfb85772f17fa65261e5607b7042b52ae81aa3c395ad3b38296a90c6df1"} Jan 22 16:26:02 crc kubenswrapper[5032]: I0122 16:26:02.170707 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerStarted","Data":"2b9e2810c7d9c12f1d964d0fd6db43be9ebb221bb858d3346f1e698625476856"} Jan 22 16:26:03 crc kubenswrapper[5032]: I0122 16:26:03.182157 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerStarted","Data":"e63e5a5000dc288dd6631112a4dd75529a7bac60f75948ccfe92e734ffd3f3dd"} Jan 22 16:26:04 crc kubenswrapper[5032]: I0122 16:26:04.199186 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerStarted","Data":"62df2d7637b61f3dae8efaa95a22955f75b13cb51ccf327f50199a2b1d506966"} Jan 22 16:26:05 crc kubenswrapper[5032]: E0122 16:26:05.527785 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:05 crc kubenswrapper[5032]: E0122 16:26:05.530237 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:05 crc kubenswrapper[5032]: E0122 16:26:05.532273 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:05 crc kubenswrapper[5032]: E0122 16:26:05.532318 5032 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:06 crc kubenswrapper[5032]: I0122 16:26:06.227950 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerStarted","Data":"647f9b692ea4cdb152c150bd0d2a087661a2c16d3a3ce11fd048995a61e660f4"} Jan 22 16:26:06 crc kubenswrapper[5032]: I0122 16:26:06.228412 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:26:06 crc kubenswrapper[5032]: I0122 16:26:06.267392 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.291815855 podStartE2EDuration="6.267376335s" podCreationTimestamp="2026-01-22 16:26:00 +0000 UTC" firstStartedPulling="2026-01-22 16:26:01.049700918 +0000 UTC m=+1264.861869949" lastFinishedPulling="2026-01-22 16:26:05.025261398 +0000 UTC m=+1268.837430429" observedRunningTime="2026-01-22 16:26:06.259665617 +0000 UTC m=+1270.071834678" watchObservedRunningTime="2026-01-22 16:26:06.267376335 +0000 UTC m=+1270.079545366" Jan 22 16:26:10 crc kubenswrapper[5032]: E0122 16:26:10.528262 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:10 crc kubenswrapper[5032]: E0122 16:26:10.531361 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:10 crc kubenswrapper[5032]: E0122 16:26:10.534182 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:10 crc kubenswrapper[5032]: E0122 16:26:10.534271 5032 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:15 crc kubenswrapper[5032]: E0122 16:26:15.527920 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:15 crc kubenswrapper[5032]: E0122 16:26:15.530249 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:15 crc kubenswrapper[5032]: E0122 16:26:15.531591 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:15 crc kubenswrapper[5032]: E0122 16:26:15.531636 5032 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:20 crc kubenswrapper[5032]: E0122 16:26:20.527986 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:20 crc kubenswrapper[5032]: E0122 16:26:20.531399 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:20 crc kubenswrapper[5032]: E0122 16:26:20.533406 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 16:26:20 crc kubenswrapper[5032]: E0122 16:26:20.533446 5032 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:22 crc kubenswrapper[5032]: I0122 16:26:22.425453 5032 generic.go:334] "Generic (PLEG): container finished" podID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" exitCode=137 Jan 22 16:26:22 crc kubenswrapper[5032]: I0122 16:26:22.425553 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4be2c6b7-f91a-44a6-b698-9019bd5dd86a","Type":"ContainerDied","Data":"5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e"} Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.152152 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.194138 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqvs7\" (UniqueName: \"kubernetes.io/projected/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-kube-api-access-fqvs7\") pod \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.194254 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-combined-ca-bundle\") pod \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.194306 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-config-data\") pod \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\" (UID: \"4be2c6b7-f91a-44a6-b698-9019bd5dd86a\") " Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.208770 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-kube-api-access-fqvs7" (OuterVolumeSpecName: "kube-api-access-fqvs7") pod "4be2c6b7-f91a-44a6-b698-9019bd5dd86a" (UID: "4be2c6b7-f91a-44a6-b698-9019bd5dd86a"). InnerVolumeSpecName "kube-api-access-fqvs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.230862 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-config-data" (OuterVolumeSpecName: "config-data") pod "4be2c6b7-f91a-44a6-b698-9019bd5dd86a" (UID: "4be2c6b7-f91a-44a6-b698-9019bd5dd86a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.243319 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4be2c6b7-f91a-44a6-b698-9019bd5dd86a" (UID: "4be2c6b7-f91a-44a6-b698-9019bd5dd86a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.296718 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqvs7\" (UniqueName: \"kubernetes.io/projected/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-kube-api-access-fqvs7\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.296759 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.296773 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be2c6b7-f91a-44a6-b698-9019bd5dd86a-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.440024 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4be2c6b7-f91a-44a6-b698-9019bd5dd86a","Type":"ContainerDied","Data":"a5311ec68a50429700292f9241dcce9fb8180c7424dee26bb97c91d39b2ca493"} Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.440091 5032 scope.go:117] "RemoveContainer" containerID="5057cf84d8fa9037445dea2c250817fe0dae104291e2392897b66db8ee501a8e" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.440130 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.490118 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.498540 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.515610 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:26:23 crc kubenswrapper[5032]: E0122 16:26:23.516523 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.516677 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.517216 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" containerName="nova-cell0-conductor-conductor" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.518453 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.521561 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.522019 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mv6pk" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.524124 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.602433 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6t52\" (UniqueName: \"kubernetes.io/projected/cc55079d-e847-4e8b-aa43-55391b8d7a8a-kube-api-access-x6t52\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.602963 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc55079d-e847-4e8b-aa43-55391b8d7a8a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.603239 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc55079d-e847-4e8b-aa43-55391b8d7a8a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.705547 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6t52\" (UniqueName: \"kubernetes.io/projected/cc55079d-e847-4e8b-aa43-55391b8d7a8a-kube-api-access-x6t52\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.705677 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc55079d-e847-4e8b-aa43-55391b8d7a8a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.705713 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc55079d-e847-4e8b-aa43-55391b8d7a8a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.712060 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc55079d-e847-4e8b-aa43-55391b8d7a8a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.712264 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc55079d-e847-4e8b-aa43-55391b8d7a8a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.724670 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6t52\" (UniqueName: \"kubernetes.io/projected/cc55079d-e847-4e8b-aa43-55391b8d7a8a-kube-api-access-x6t52\") pod \"nova-cell0-conductor-0\" (UID: \"cc55079d-e847-4e8b-aa43-55391b8d7a8a\") " pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:23 crc kubenswrapper[5032]: I0122 16:26:23.843128 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:24 crc kubenswrapper[5032]: I0122 16:26:24.339980 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 16:26:24 crc kubenswrapper[5032]: W0122 16:26:24.342981 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc55079d_e847_4e8b_aa43_55391b8d7a8a.slice/crio-4b4cd1897b7e99375f5b3effb04880593110fc10554181091314e44b431efd10 WatchSource:0}: Error finding container 4b4cd1897b7e99375f5b3effb04880593110fc10554181091314e44b431efd10: Status 404 returned error can't find the container with id 4b4cd1897b7e99375f5b3effb04880593110fc10554181091314e44b431efd10 Jan 22 16:26:24 crc kubenswrapper[5032]: I0122 16:26:24.450914 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"cc55079d-e847-4e8b-aa43-55391b8d7a8a","Type":"ContainerStarted","Data":"4b4cd1897b7e99375f5b3effb04880593110fc10554181091314e44b431efd10"} Jan 22 16:26:24 crc kubenswrapper[5032]: I0122 16:26:24.467783 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4be2c6b7-f91a-44a6-b698-9019bd5dd86a" path="/var/lib/kubelet/pods/4be2c6b7-f91a-44a6-b698-9019bd5dd86a/volumes" Jan 22 16:26:25 crc kubenswrapper[5032]: I0122 16:26:25.466917 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"cc55079d-e847-4e8b-aa43-55391b8d7a8a","Type":"ContainerStarted","Data":"390c5955209372ac25319003922253aed13481c16433308eca09d12c37a478b5"} Jan 22 16:26:25 crc kubenswrapper[5032]: I0122 16:26:25.467383 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:25 crc kubenswrapper[5032]: I0122 16:26:25.498875 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.498837272 podStartE2EDuration="2.498837272s" podCreationTimestamp="2026-01-22 16:26:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:26:25.493518748 +0000 UTC m=+1289.305687849" watchObservedRunningTime="2026-01-22 16:26:25.498837272 +0000 UTC m=+1289.311006323" Jan 22 16:26:30 crc kubenswrapper[5032]: I0122 16:26:30.575120 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 16:26:33 crc kubenswrapper[5032]: I0122 16:26:33.880173 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.236921 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.237463 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="87fc46da-d927-4dff-9459-e5671ee0b0c4" containerName="kube-state-metrics" containerID="cri-o://a2a12d2f91247f8366f1a3665e2845e4952deef887313c793579236078e3cb22" gracePeriod=30 Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.405958 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-bh97r"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.407042 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.409633 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.412831 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.425466 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh97r"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.549063 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-scripts\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.549426 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-config-data\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.549447 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6rm5\" (UniqueName: \"kubernetes.io/projected/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-kube-api-access-b6rm5\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.549489 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.593248 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.594510 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.596941 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.597491 5032 generic.go:334] "Generic (PLEG): container finished" podID="87fc46da-d927-4dff-9459-e5671ee0b0c4" containerID="a2a12d2f91247f8366f1a3665e2845e4952deef887313c793579236078e3cb22" exitCode=2 Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.597522 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"87fc46da-d927-4dff-9459-e5671ee0b0c4","Type":"ContainerDied","Data":"a2a12d2f91247f8366f1a3665e2845e4952deef887313c793579236078e3cb22"} Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.613278 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.689447 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-scripts\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.689525 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.689625 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-config-data\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.689653 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6rm5\" (UniqueName: \"kubernetes.io/projected/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-kube-api-access-b6rm5\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.689782 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.689831 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-config-data\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.689973 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcmh7\" (UniqueName: \"kubernetes.io/projected/26be7f12-9b5e-47fe-b897-422800b99608-kube-api-access-xcmh7\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.762254 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.762265 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-config-data\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.762666 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-scripts\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.799403 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.799516 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6rm5\" (UniqueName: \"kubernetes.io/projected/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-kube-api-access-b6rm5\") pod \"nova-cell0-cell-mapping-bh97r\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.800437 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.800525 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-config-data\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.800566 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcmh7\" (UniqueName: \"kubernetes.io/projected/26be7f12-9b5e-47fe-b897-422800b99608-kube-api-access-xcmh7\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.800935 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.804434 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.819108 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.820529 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.821477 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.828136 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcmh7\" (UniqueName: \"kubernetes.io/projected/26be7f12-9b5e-47fe-b897-422800b99608-kube-api-access-xcmh7\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.829073 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.834923 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-config-data\") pod \"nova-scheduler-0\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.870901 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.883336 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902463 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-config-data\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902498 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902523 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrmtr\" (UniqueName: \"kubernetes.io/projected/02dafeaa-651a-4a16-a674-2a47fb1a929e-kube-api-access-rrmtr\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902633 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-config-data\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902658 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02dafeaa-651a-4a16-a674-2a47fb1a929e-logs\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902678 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-logs\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902695 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjt24\" (UniqueName: \"kubernetes.io/projected/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-kube-api-access-sjt24\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.902735 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.910755 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-m6twj"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.912819 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.916617 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.924119 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-m6twj"] Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.939852 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:26:34 crc kubenswrapper[5032]: E0122 16:26:34.942670 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87fc46da-d927-4dff-9459-e5671ee0b0c4" containerName="kube-state-metrics" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.942689 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="87fc46da-d927-4dff-9459-e5671ee0b0c4" containerName="kube-state-metrics" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.942979 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="87fc46da-d927-4dff-9459-e5671ee0b0c4" containerName="kube-state-metrics" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.943887 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.948050 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.948455 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:26:34 crc kubenswrapper[5032]: I0122 16:26:34.949794 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.006999 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pb4dk\" (UniqueName: \"kubernetes.io/projected/87fc46da-d927-4dff-9459-e5671ee0b0c4-kube-api-access-pb4dk\") pod \"87fc46da-d927-4dff-9459-e5671ee0b0c4\" (UID: \"87fc46da-d927-4dff-9459-e5671ee0b0c4\") " Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.007743 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-config-data\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.007814 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02dafeaa-651a-4a16-a674-2a47fb1a929e-logs\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.007835 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-logs\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.007888 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjt24\" (UniqueName: \"kubernetes.io/projected/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-kube-api-access-sjt24\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.007928 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-svc\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.007973 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008010 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008047 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7q2h\" (UniqueName: \"kubernetes.io/projected/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-kube-api-access-h7q2h\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008189 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-config\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008369 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-config-data\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008429 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008452 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008475 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008496 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrmtr\" (UniqueName: \"kubernetes.io/projected/02dafeaa-651a-4a16-a674-2a47fb1a929e-kube-api-access-rrmtr\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008499 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02dafeaa-651a-4a16-a674-2a47fb1a929e-logs\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008523 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r894f\" (UniqueName: \"kubernetes.io/projected/ebcf61e5-f002-4991-a3c6-4d2d76a99078-kube-api-access-r894f\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008581 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008623 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.008889 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-logs\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.011108 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.011528 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87fc46da-d927-4dff-9459-e5671ee0b0c4-kube-api-access-pb4dk" (OuterVolumeSpecName: "kube-api-access-pb4dk") pod "87fc46da-d927-4dff-9459-e5671ee0b0c4" (UID: "87fc46da-d927-4dff-9459-e5671ee0b0c4"). InnerVolumeSpecName "kube-api-access-pb4dk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.017455 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-config-data\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.024155 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-config-data\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.024787 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.027380 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrmtr\" (UniqueName: \"kubernetes.io/projected/02dafeaa-651a-4a16-a674-2a47fb1a929e-kube-api-access-rrmtr\") pod \"nova-api-0\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.027727 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjt24\" (UniqueName: \"kubernetes.io/projected/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-kube-api-access-sjt24\") pod \"nova-metadata-0\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.029761 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.110727 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.110827 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112032 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112469 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-svc\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112503 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112554 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7q2h\" (UniqueName: \"kubernetes.io/projected/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-kube-api-access-h7q2h\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112578 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-config\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112651 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112668 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112699 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r894f\" (UniqueName: \"kubernetes.io/projected/ebcf61e5-f002-4991-a3c6-4d2d76a99078-kube-api-access-r894f\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.112760 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pb4dk\" (UniqueName: \"kubernetes.io/projected/87fc46da-d927-4dff-9459-e5671ee0b0c4-kube-api-access-pb4dk\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.114270 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.115139 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-config\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.116184 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.118165 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.118202 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.118238 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-svc\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.132129 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r894f\" (UniqueName: \"kubernetes.io/projected/ebcf61e5-f002-4991-a3c6-4d2d76a99078-kube-api-access-r894f\") pod \"nova-cell1-novncproxy-0\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.138670 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7q2h\" (UniqueName: \"kubernetes.io/projected/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-kube-api-access-h7q2h\") pod \"dnsmasq-dns-bccf8f775-m6twj\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.198238 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.210373 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.242585 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.279617 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.395121 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8h9gw"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.397705 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.399821 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.405028 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.419800 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.419906 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79qg6\" (UniqueName: \"kubernetes.io/projected/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-kube-api-access-79qg6\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.419938 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-scripts\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.419964 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-config-data\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.428236 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8h9gw"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.448630 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.495393 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.522428 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.522565 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79qg6\" (UniqueName: \"kubernetes.io/projected/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-kube-api-access-79qg6\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.522602 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-scripts\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.522647 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-config-data\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.529762 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.530540 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-scripts\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.536553 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-config-data\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.545399 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79qg6\" (UniqueName: \"kubernetes.io/projected/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-kube-api-access-79qg6\") pod \"nova-cell1-conductor-db-sync-8h9gw\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.595618 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh97r"] Jan 22 16:26:35 crc kubenswrapper[5032]: W0122 16:26:35.600453 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66e893a9_0ee3_4c0e_98d7_5d85d75b46c3.slice/crio-3636f75de8b8434c2f7b92c2eb18f2e6644208229bf0d618b0ad77d445abfd5c WatchSource:0}: Error finding container 3636f75de8b8434c2f7b92c2eb18f2e6644208229bf0d618b0ad77d445abfd5c: Status 404 returned error can't find the container with id 3636f75de8b8434c2f7b92c2eb18f2e6644208229bf0d618b0ad77d445abfd5c Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.609515 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"26be7f12-9b5e-47fe-b897-422800b99608","Type":"ContainerStarted","Data":"dde762e8d14572ef389bb87cd99e85564c174e943a107cedb9c9bb436a111dd5"} Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.615943 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"87fc46da-d927-4dff-9459-e5671ee0b0c4","Type":"ContainerDied","Data":"54c10a8583aad587e9f429897e9ac602f31b11c38134f25ef67bd7bf1467b3e5"} Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.615981 5032 scope.go:117] "RemoveContainer" containerID="a2a12d2f91247f8366f1a3665e2845e4952deef887313c793579236078e3cb22" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.616022 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.663784 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.678940 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.694879 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.696357 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.697965 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.698122 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.716705 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.727599 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.727675 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dzxj\" (UniqueName: \"kubernetes.io/projected/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-api-access-4dzxj\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.727791 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.727851 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.784047 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.830011 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.830090 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.830171 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.830208 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dzxj\" (UniqueName: \"kubernetes.io/projected/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-api-access-4dzxj\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.839052 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.842113 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.842797 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4751594-9c2d-4f14-8812-8e01b22ab7ee-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: W0122 16:26:35.845733 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebcf61e5_f002_4991_a3c6_4d2d76a99078.slice/crio-9dab111c61de7c7fe653bf30b435a5add9a15f3e3fafca8bc0873b3060e7d4fa WatchSource:0}: Error finding container 9dab111c61de7c7fe653bf30b435a5add9a15f3e3fafca8bc0873b3060e7d4fa: Status 404 returned error can't find the container with id 9dab111c61de7c7fe653bf30b435a5add9a15f3e3fafca8bc0873b3060e7d4fa Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.845780 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-m6twj"] Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.852085 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dzxj\" (UniqueName: \"kubernetes.io/projected/c4751594-9c2d-4f14-8812-8e01b22ab7ee-kube-api-access-4dzxj\") pod \"kube-state-metrics-0\" (UID: \"c4751594-9c2d-4f14-8812-8e01b22ab7ee\") " pod="openstack/kube-state-metrics-0" Jan 22 16:26:35 crc kubenswrapper[5032]: I0122 16:26:35.858881 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.020098 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.039910 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.062988 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.259067 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8h9gw"] Jan 22 16:26:36 crc kubenswrapper[5032]: W0122 16:26:36.316326 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2acebf7_3c11_4c28_b0fc_980c3a05ab71.slice/crio-43f2d83439f512a9012b6bcd8198b2a80a12a3c02be1cdeeb2d7fcde7a049bac WatchSource:0}: Error finding container 43f2d83439f512a9012b6bcd8198b2a80a12a3c02be1cdeeb2d7fcde7a049bac: Status 404 returned error can't find the container with id 43f2d83439f512a9012b6bcd8198b2a80a12a3c02be1cdeeb2d7fcde7a049bac Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.350905 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.466106 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87fc46da-d927-4dff-9459-e5671ee0b0c4" path="/var/lib/kubelet/pods/87fc46da-d927-4dff-9459-e5671ee0b0c4/volumes" Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.626330 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"02dafeaa-651a-4a16-a674-2a47fb1a929e","Type":"ContainerStarted","Data":"521dec4b1be170c3f7ae981adeace12d8e39a7857cdc868b54a8fc3ffb037fd9"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.629889 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh97r" event={"ID":"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3","Type":"ContainerStarted","Data":"bb2391b5fc691f89772bee8eb5d77072a576646b365bc2c9786db02ec1ad39b1"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.629922 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh97r" event={"ID":"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3","Type":"ContainerStarted","Data":"3636f75de8b8434c2f7b92c2eb18f2e6644208229bf0d618b0ad77d445abfd5c"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.632903 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4751594-9c2d-4f14-8812-8e01b22ab7ee","Type":"ContainerStarted","Data":"1a911b35625451cb4e6279d5583501dfe3314a7e0089efea3a1299e517d1ae25"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.634765 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" event={"ID":"c2acebf7-3c11-4c28-b0fc-980c3a05ab71","Type":"ContainerStarted","Data":"a0584988e7977e4d36817b6c17073626e3a40a7be839a06231d9027ec0e7c06c"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.634857 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" event={"ID":"c2acebf7-3c11-4c28-b0fc-980c3a05ab71","Type":"ContainerStarted","Data":"43f2d83439f512a9012b6bcd8198b2a80a12a3c02be1cdeeb2d7fcde7a049bac"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.646563 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e","Type":"ContainerStarted","Data":"a51cb0e975a39f533ebab1a78fd4a7a481e759c2dae82c09101fabf7533b2f27"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.649507 5032 generic.go:334] "Generic (PLEG): container finished" podID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerID="caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645" exitCode=0 Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.649638 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" event={"ID":"3d398643-4dc6-4bf4-9a8e-9e724e8593c3","Type":"ContainerDied","Data":"caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.649665 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" event={"ID":"3d398643-4dc6-4bf4-9a8e-9e724e8593c3","Type":"ContainerStarted","Data":"dfd66c034422e15437c081913cfa88781bbaf5ce5ae311f89aebf299910f85ec"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.651289 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ebcf61e5-f002-4991-a3c6-4d2d76a99078","Type":"ContainerStarted","Data":"9dab111c61de7c7fe653bf30b435a5add9a15f3e3fafca8bc0873b3060e7d4fa"} Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.652450 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-bh97r" podStartSLOduration=2.6524291939999998 podStartE2EDuration="2.652429194s" podCreationTimestamp="2026-01-22 16:26:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:26:36.645006556 +0000 UTC m=+1300.457175587" watchObservedRunningTime="2026-01-22 16:26:36.652429194 +0000 UTC m=+1300.464598225" Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.690645 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" podStartSLOduration=1.6906261919999999 podStartE2EDuration="1.690626192s" podCreationTimestamp="2026-01-22 16:26:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:26:36.681771736 +0000 UTC m=+1300.493940767" watchObservedRunningTime="2026-01-22 16:26:36.690626192 +0000 UTC m=+1300.502795223" Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.794541 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.794878 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-central-agent" containerID="cri-o://2b9e2810c7d9c12f1d964d0fd6db43be9ebb221bb858d3346f1e698625476856" gracePeriod=30 Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.794958 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="sg-core" containerID="cri-o://62df2d7637b61f3dae8efaa95a22955f75b13cb51ccf327f50199a2b1d506966" gracePeriod=30 Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.795021 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-notification-agent" containerID="cri-o://e63e5a5000dc288dd6631112a4dd75529a7bac60f75948ccfe92e734ffd3f3dd" gracePeriod=30 Jan 22 16:26:36 crc kubenswrapper[5032]: I0122 16:26:36.795007 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="proxy-httpd" containerID="cri-o://647f9b692ea4cdb152c150bd0d2a087661a2c16d3a3ce11fd048995a61e660f4" gracePeriod=30 Jan 22 16:26:37 crc kubenswrapper[5032]: I0122 16:26:37.662983 5032 generic.go:334] "Generic (PLEG): container finished" podID="d8193f75-058b-4688-a37c-794e91abe580" containerID="647f9b692ea4cdb152c150bd0d2a087661a2c16d3a3ce11fd048995a61e660f4" exitCode=0 Jan 22 16:26:37 crc kubenswrapper[5032]: I0122 16:26:37.663238 5032 generic.go:334] "Generic (PLEG): container finished" podID="d8193f75-058b-4688-a37c-794e91abe580" containerID="62df2d7637b61f3dae8efaa95a22955f75b13cb51ccf327f50199a2b1d506966" exitCode=2 Jan 22 16:26:37 crc kubenswrapper[5032]: I0122 16:26:37.663249 5032 generic.go:334] "Generic (PLEG): container finished" podID="d8193f75-058b-4688-a37c-794e91abe580" containerID="2b9e2810c7d9c12f1d964d0fd6db43be9ebb221bb858d3346f1e698625476856" exitCode=0 Jan 22 16:26:37 crc kubenswrapper[5032]: I0122 16:26:37.663058 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerDied","Data":"647f9b692ea4cdb152c150bd0d2a087661a2c16d3a3ce11fd048995a61e660f4"} Jan 22 16:26:37 crc kubenswrapper[5032]: I0122 16:26:37.663464 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerDied","Data":"62df2d7637b61f3dae8efaa95a22955f75b13cb51ccf327f50199a2b1d506966"} Jan 22 16:26:37 crc kubenswrapper[5032]: I0122 16:26:37.663475 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerDied","Data":"2b9e2810c7d9c12f1d964d0fd6db43be9ebb221bb858d3346f1e698625476856"} Jan 22 16:26:38 crc kubenswrapper[5032]: I0122 16:26:38.662124 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:26:38 crc kubenswrapper[5032]: I0122 16:26:38.672647 5032 generic.go:334] "Generic (PLEG): container finished" podID="d8193f75-058b-4688-a37c-794e91abe580" containerID="e63e5a5000dc288dd6631112a4dd75529a7bac60f75948ccfe92e734ffd3f3dd" exitCode=0 Jan 22 16:26:38 crc kubenswrapper[5032]: I0122 16:26:38.672816 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerDied","Data":"e63e5a5000dc288dd6631112a4dd75529a7bac60f75948ccfe92e734ffd3f3dd"} Jan 22 16:26:38 crc kubenswrapper[5032]: I0122 16:26:38.694095 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.280671 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.430504 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kv79\" (UniqueName: \"kubernetes.io/projected/d8193f75-058b-4688-a37c-794e91abe580-kube-api-access-6kv79\") pod \"d8193f75-058b-4688-a37c-794e91abe580\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.430894 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-config-data\") pod \"d8193f75-058b-4688-a37c-794e91abe580\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.431033 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-scripts\") pod \"d8193f75-058b-4688-a37c-794e91abe580\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.431068 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-log-httpd\") pod \"d8193f75-058b-4688-a37c-794e91abe580\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.431100 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-run-httpd\") pod \"d8193f75-058b-4688-a37c-794e91abe580\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.431125 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-combined-ca-bundle\") pod \"d8193f75-058b-4688-a37c-794e91abe580\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.431192 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-sg-core-conf-yaml\") pod \"d8193f75-058b-4688-a37c-794e91abe580\" (UID: \"d8193f75-058b-4688-a37c-794e91abe580\") " Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.432039 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d8193f75-058b-4688-a37c-794e91abe580" (UID: "d8193f75-058b-4688-a37c-794e91abe580"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.432260 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d8193f75-058b-4688-a37c-794e91abe580" (UID: "d8193f75-058b-4688-a37c-794e91abe580"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.435336 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8193f75-058b-4688-a37c-794e91abe580-kube-api-access-6kv79" (OuterVolumeSpecName: "kube-api-access-6kv79") pod "d8193f75-058b-4688-a37c-794e91abe580" (UID: "d8193f75-058b-4688-a37c-794e91abe580"). InnerVolumeSpecName "kube-api-access-6kv79". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.438139 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-scripts" (OuterVolumeSpecName: "scripts") pod "d8193f75-058b-4688-a37c-794e91abe580" (UID: "d8193f75-058b-4688-a37c-794e91abe580"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.479223 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d8193f75-058b-4688-a37c-794e91abe580" (UID: "d8193f75-058b-4688-a37c-794e91abe580"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.527977 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8193f75-058b-4688-a37c-794e91abe580" (UID: "d8193f75-058b-4688-a37c-794e91abe580"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.533229 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kv79\" (UniqueName: \"kubernetes.io/projected/d8193f75-058b-4688-a37c-794e91abe580-kube-api-access-6kv79\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.533258 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.533268 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.533276 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8193f75-058b-4688-a37c-794e91abe580-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.533285 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.533295 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.564668 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-config-data" (OuterVolumeSpecName: "config-data") pod "d8193f75-058b-4688-a37c-794e91abe580" (UID: "d8193f75-058b-4688-a37c-794e91abe580"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.635279 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8193f75-058b-4688-a37c-794e91abe580-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.682793 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ebcf61e5-f002-4991-a3c6-4d2d76a99078","Type":"ContainerStarted","Data":"391d21da41bd3d4885d249378d16083cd3a62e2575da39d4ef5bd1276b38b691"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.682937 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ebcf61e5-f002-4991-a3c6-4d2d76a99078" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://391d21da41bd3d4885d249378d16083cd3a62e2575da39d4ef5bd1276b38b691" gracePeriod=30 Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.685000 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"02dafeaa-651a-4a16-a674-2a47fb1a929e","Type":"ContainerStarted","Data":"59c9eddde67294a68f55927df095250cf658786eb76499fc8a83d0808e4d738d"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.685093 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"02dafeaa-651a-4a16-a674-2a47fb1a929e","Type":"ContainerStarted","Data":"cd38915d2f00fb4fc13e43a27a7fa107f84a31d1befa3bd9f982e806422c8db8"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.691558 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8193f75-058b-4688-a37c-794e91abe580","Type":"ContainerDied","Data":"107aecfb85772f17fa65261e5607b7042b52ae81aa3c395ad3b38296a90c6df1"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.691625 5032 scope.go:117] "RemoveContainer" containerID="647f9b692ea4cdb152c150bd0d2a087661a2c16d3a3ce11fd048995a61e660f4" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.691566 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.693549 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4751594-9c2d-4f14-8812-8e01b22ab7ee","Type":"ContainerStarted","Data":"3cd6e9fec24ddd4a16b6780bc142b2c53fcffc9ac02a7a15dde9eede358eaf71"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.694298 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.699105 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e","Type":"ContainerStarted","Data":"187319375ed35002597711ca0119b2da3b8fcd449223b50013a7ca5972623cb6"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.699156 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e","Type":"ContainerStarted","Data":"4511510e16a9f62fcbbb95875e87a73ed8b3f90e3f0c2a40044dd406b2fb8650"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.699290 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-log" containerID="cri-o://4511510e16a9f62fcbbb95875e87a73ed8b3f90e3f0c2a40044dd406b2fb8650" gracePeriod=30 Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.699381 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-metadata" containerID="cri-o://187319375ed35002597711ca0119b2da3b8fcd449223b50013a7ca5972623cb6" gracePeriod=30 Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.705823 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" event={"ID":"3d398643-4dc6-4bf4-9a8e-9e724e8593c3","Type":"ContainerStarted","Data":"4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.706670 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.711889 5032 scope.go:117] "RemoveContainer" containerID="62df2d7637b61f3dae8efaa95a22955f75b13cb51ccf327f50199a2b1d506966" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.712194 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"26be7f12-9b5e-47fe-b897-422800b99608","Type":"ContainerStarted","Data":"ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935"} Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.713952 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.5573965210000003 podStartE2EDuration="5.713940619s" podCreationTimestamp="2026-01-22 16:26:34 +0000 UTC" firstStartedPulling="2026-01-22 16:26:35.850065414 +0000 UTC m=+1299.662234445" lastFinishedPulling="2026-01-22 16:26:39.006609512 +0000 UTC m=+1302.818778543" observedRunningTime="2026-01-22 16:26:39.699752481 +0000 UTC m=+1303.511921512" watchObservedRunningTime="2026-01-22 16:26:39.713940619 +0000 UTC m=+1303.526109650" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.737128 5032 scope.go:117] "RemoveContainer" containerID="e63e5a5000dc288dd6631112a4dd75529a7bac60f75948ccfe92e734ffd3f3dd" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.745728 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.770514822 podStartE2EDuration="5.745707916s" podCreationTimestamp="2026-01-22 16:26:34 +0000 UTC" firstStartedPulling="2026-01-22 16:26:36.037777178 +0000 UTC m=+1299.849946209" lastFinishedPulling="2026-01-22 16:26:39.012970262 +0000 UTC m=+1302.825139303" observedRunningTime="2026-01-22 16:26:39.724229013 +0000 UTC m=+1303.536398044" watchObservedRunningTime="2026-01-22 16:26:39.745707916 +0000 UTC m=+1303.557876947" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.751135 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.9302820179999998 podStartE2EDuration="4.75111681s" podCreationTimestamp="2026-01-22 16:26:35 +0000 UTC" firstStartedPulling="2026-01-22 16:26:36.364794406 +0000 UTC m=+1300.176963437" lastFinishedPulling="2026-01-22 16:26:37.185629198 +0000 UTC m=+1300.997798229" observedRunningTime="2026-01-22 16:26:39.743267291 +0000 UTC m=+1303.555436312" watchObservedRunningTime="2026-01-22 16:26:39.75111681 +0000 UTC m=+1303.563285841" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.775476 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.806784888 podStartE2EDuration="5.775448398s" podCreationTimestamp="2026-01-22 16:26:34 +0000 UTC" firstStartedPulling="2026-01-22 16:26:36.037964213 +0000 UTC m=+1299.850133244" lastFinishedPulling="2026-01-22 16:26:39.006627723 +0000 UTC m=+1302.818796754" observedRunningTime="2026-01-22 16:26:39.762054341 +0000 UTC m=+1303.574223372" watchObservedRunningTime="2026-01-22 16:26:39.775448398 +0000 UTC m=+1303.587617449" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.781734 5032 scope.go:117] "RemoveContainer" containerID="2b9e2810c7d9c12f1d964d0fd6db43be9ebb221bb858d3346f1e698625476856" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.792048 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" podStartSLOduration=5.792030671 podStartE2EDuration="5.792030671s" podCreationTimestamp="2026-01-22 16:26:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:26:39.786170954 +0000 UTC m=+1303.598339985" watchObservedRunningTime="2026-01-22 16:26:39.792030671 +0000 UTC m=+1303.604199692" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.807766 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.830085 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.835691 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.338471415 podStartE2EDuration="5.835667764s" podCreationTimestamp="2026-01-22 16:26:34 +0000 UTC" firstStartedPulling="2026-01-22 16:26:35.495120832 +0000 UTC m=+1299.307289863" lastFinishedPulling="2026-01-22 16:26:38.992317181 +0000 UTC m=+1302.804486212" observedRunningTime="2026-01-22 16:26:39.817558251 +0000 UTC m=+1303.629727282" watchObservedRunningTime="2026-01-22 16:26:39.835667764 +0000 UTC m=+1303.647836795" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.853993 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:39 crc kubenswrapper[5032]: E0122 16:26:39.854399 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-notification-agent" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854416 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-notification-agent" Jan 22 16:26:39 crc kubenswrapper[5032]: E0122 16:26:39.854448 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="sg-core" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854455 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="sg-core" Jan 22 16:26:39 crc kubenswrapper[5032]: E0122 16:26:39.854467 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-central-agent" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854473 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-central-agent" Jan 22 16:26:39 crc kubenswrapper[5032]: E0122 16:26:39.854484 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="proxy-httpd" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854490 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="proxy-httpd" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854656 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="proxy-httpd" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854673 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="sg-core" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854682 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-central-agent" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.854693 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8193f75-058b-4688-a37c-794e91abe580" containerName="ceilometer-notification-agent" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.856366 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.861814 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.862064 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.862184 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.863201 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:39 crc kubenswrapper[5032]: I0122 16:26:39.949948 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.049239 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-config-data\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.049304 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-scripts\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.049524 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.049764 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-log-httpd\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.049823 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-run-httpd\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.049885 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4znnw\" (UniqueName: \"kubernetes.io/projected/98ac7768-3f90-4ec2-ac29-11eafd105fe2-kube-api-access-4znnw\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.049936 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.050079 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151008 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-config-data\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151063 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-scripts\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151113 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151176 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-log-httpd\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151195 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-run-httpd\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151214 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4znnw\" (UniqueName: \"kubernetes.io/projected/98ac7768-3f90-4ec2-ac29-11eafd105fe2-kube-api-access-4znnw\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151267 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.151310 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.152877 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-log-httpd\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.155789 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-run-httpd\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.156923 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.157288 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.158187 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-scripts\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.159604 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.160460 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-config-data\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.175299 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4znnw\" (UniqueName: \"kubernetes.io/projected/98ac7768-3f90-4ec2-ac29-11eafd105fe2-kube-api-access-4znnw\") pod \"ceilometer-0\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.211763 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.212103 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.251272 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.280448 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.493138 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8193f75-058b-4688-a37c-794e91abe580" path="/var/lib/kubelet/pods/d8193f75-058b-4688-a37c-794e91abe580/volumes" Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.727364 5032 generic.go:334] "Generic (PLEG): container finished" podID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerID="4511510e16a9f62fcbbb95875e87a73ed8b3f90e3f0c2a40044dd406b2fb8650" exitCode=143 Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.727454 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e","Type":"ContainerDied","Data":"4511510e16a9f62fcbbb95875e87a73ed8b3f90e3f0c2a40044dd406b2fb8650"} Jan 22 16:26:40 crc kubenswrapper[5032]: I0122 16:26:40.744738 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:26:40 crc kubenswrapper[5032]: W0122 16:26:40.749931 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98ac7768_3f90_4ec2_ac29_11eafd105fe2.slice/crio-d2070d26ddce2773208466469e900636071918173e849e930d1ad3dcbfd9c177 WatchSource:0}: Error finding container d2070d26ddce2773208466469e900636071918173e849e930d1ad3dcbfd9c177: Status 404 returned error can't find the container with id d2070d26ddce2773208466469e900636071918173e849e930d1ad3dcbfd9c177 Jan 22 16:26:41 crc kubenswrapper[5032]: I0122 16:26:41.743700 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerStarted","Data":"0e5ba6d6e690cfe7b5f60321eb6f176f87444f23739db05aa9f75c8f78266a3f"} Jan 22 16:26:41 crc kubenswrapper[5032]: I0122 16:26:41.744331 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerStarted","Data":"d2070d26ddce2773208466469e900636071918173e849e930d1ad3dcbfd9c177"} Jan 22 16:26:42 crc kubenswrapper[5032]: I0122 16:26:42.757103 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerStarted","Data":"3e4e71c0e814d38794610c7dca4aecab7b48a71e6cdfdc5d739f48651eab462f"} Jan 22 16:26:43 crc kubenswrapper[5032]: I0122 16:26:43.772188 5032 generic.go:334] "Generic (PLEG): container finished" podID="66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" containerID="bb2391b5fc691f89772bee8eb5d77072a576646b365bc2c9786db02ec1ad39b1" exitCode=0 Jan 22 16:26:43 crc kubenswrapper[5032]: I0122 16:26:43.772578 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh97r" event={"ID":"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3","Type":"ContainerDied","Data":"bb2391b5fc691f89772bee8eb5d77072a576646b365bc2c9786db02ec1ad39b1"} Jan 22 16:26:43 crc kubenswrapper[5032]: I0122 16:26:43.775581 5032 generic.go:334] "Generic (PLEG): container finished" podID="c2acebf7-3c11-4c28-b0fc-980c3a05ab71" containerID="a0584988e7977e4d36817b6c17073626e3a40a7be839a06231d9027ec0e7c06c" exitCode=0 Jan 22 16:26:43 crc kubenswrapper[5032]: I0122 16:26:43.775642 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" event={"ID":"c2acebf7-3c11-4c28-b0fc-980c3a05ab71","Type":"ContainerDied","Data":"a0584988e7977e4d36817b6c17073626e3a40a7be839a06231d9027ec0e7c06c"} Jan 22 16:26:43 crc kubenswrapper[5032]: I0122 16:26:43.780535 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerStarted","Data":"a1731ad3bc212d2aef48e58c7f6cfb829f1867142ce8e89f8ae70d19c356e506"} Jan 22 16:26:44 crc kubenswrapper[5032]: I0122 16:26:44.791830 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerStarted","Data":"0ee997e14bb4a85e6a2f7aee86b3f4dbc08db421cccedab0281f6c506fa2ff64"} Jan 22 16:26:44 crc kubenswrapper[5032]: I0122 16:26:44.793467 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:26:44 crc kubenswrapper[5032]: I0122 16:26:44.828628 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.226986254 podStartE2EDuration="5.828610427s" podCreationTimestamp="2026-01-22 16:26:39 +0000 UTC" firstStartedPulling="2026-01-22 16:26:40.7530447 +0000 UTC m=+1304.565213731" lastFinishedPulling="2026-01-22 16:26:44.354668853 +0000 UTC m=+1308.166837904" observedRunningTime="2026-01-22 16:26:44.815683823 +0000 UTC m=+1308.627852864" watchObservedRunningTime="2026-01-22 16:26:44.828610427 +0000 UTC m=+1308.640779458" Jan 22 16:26:44 crc kubenswrapper[5032]: I0122 16:26:44.948998 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 16:26:44 crc kubenswrapper[5032]: I0122 16:26:44.980590 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.199045 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.199373 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.244986 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.258248 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.265034 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267118 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-config-data\") pod \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267201 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79qg6\" (UniqueName: \"kubernetes.io/projected/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-kube-api-access-79qg6\") pod \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267251 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-combined-ca-bundle\") pod \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267309 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-combined-ca-bundle\") pod \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267327 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6rm5\" (UniqueName: \"kubernetes.io/projected/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-kube-api-access-b6rm5\") pod \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267355 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-scripts\") pod \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267391 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-scripts\") pod \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\" (UID: \"c2acebf7-3c11-4c28-b0fc-980c3a05ab71\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.267433 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-config-data\") pod \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\" (UID: \"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3\") " Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.278179 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-scripts" (OuterVolumeSpecName: "scripts") pod "c2acebf7-3c11-4c28-b0fc-980c3a05ab71" (UID: "c2acebf7-3c11-4c28-b0fc-980c3a05ab71"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.278349 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-kube-api-access-79qg6" (OuterVolumeSpecName: "kube-api-access-79qg6") pod "c2acebf7-3c11-4c28-b0fc-980c3a05ab71" (UID: "c2acebf7-3c11-4c28-b0fc-980c3a05ab71"). InnerVolumeSpecName "kube-api-access-79qg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.327132 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-scripts" (OuterVolumeSpecName: "scripts") pod "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" (UID: "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.354306 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-kube-api-access-b6rm5" (OuterVolumeSpecName: "kube-api-access-b6rm5") pod "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" (UID: "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3"). InnerVolumeSpecName "kube-api-access-b6rm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.368714 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" (UID: "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.368714 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2acebf7-3c11-4c28-b0fc-980c3a05ab71" (UID: "c2acebf7-3c11-4c28-b0fc-980c3a05ab71"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.369970 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-config-data" (OuterVolumeSpecName: "config-data") pod "c2acebf7-3c11-4c28-b0fc-980c3a05ab71" (UID: "c2acebf7-3c11-4c28-b0fc-980c3a05ab71"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.371031 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.371053 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79qg6\" (UniqueName: \"kubernetes.io/projected/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-kube-api-access-79qg6\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.371065 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.371074 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.371083 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6rm5\" (UniqueName: \"kubernetes.io/projected/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-kube-api-access-b6rm5\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.371091 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.371099 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2acebf7-3c11-4c28-b0fc-980c3a05ab71-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.378961 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-config-data" (OuterVolumeSpecName: "config-data") pod "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" (UID: "66e893a9-0ee3-4c0e-98d7-5d85d75b46c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.403643 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-84wwk"] Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.403889 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" podUID="3f47d550-9337-412b-be06-1034c44509c9" containerName="dnsmasq-dns" containerID="cri-o://ee7960c8547486c961e372b3a389acdc7d5ba27372e55f58797f611a451c97c5" gracePeriod=10 Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.479814 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.801956 5032 generic.go:334] "Generic (PLEG): container finished" podID="3f47d550-9337-412b-be06-1034c44509c9" containerID="ee7960c8547486c961e372b3a389acdc7d5ba27372e55f58797f611a451c97c5" exitCode=0 Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.802047 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" event={"ID":"3f47d550-9337-412b-be06-1034c44509c9","Type":"ContainerDied","Data":"ee7960c8547486c961e372b3a389acdc7d5ba27372e55f58797f611a451c97c5"} Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.804964 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh97r" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.804977 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh97r" event={"ID":"66e893a9-0ee3-4c0e-98d7-5d85d75b46c3","Type":"ContainerDied","Data":"3636f75de8b8434c2f7b92c2eb18f2e6644208229bf0d618b0ad77d445abfd5c"} Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.805009 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3636f75de8b8434c2f7b92c2eb18f2e6644208229bf0d618b0ad77d445abfd5c" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.809052 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.813495 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8h9gw" event={"ID":"c2acebf7-3c11-4c28-b0fc-980c3a05ab71","Type":"ContainerDied","Data":"43f2d83439f512a9012b6bcd8198b2a80a12a3c02be1cdeeb2d7fcde7a049bac"} Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.813546 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43f2d83439f512a9012b6bcd8198b2a80a12a3c02be1cdeeb2d7fcde7a049bac" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.856521 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.894794 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 16:26:45 crc kubenswrapper[5032]: E0122 16:26:45.895276 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" containerName="nova-manage" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.895297 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" containerName="nova-manage" Jan 22 16:26:45 crc kubenswrapper[5032]: E0122 16:26:45.895339 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2acebf7-3c11-4c28-b0fc-980c3a05ab71" containerName="nova-cell1-conductor-db-sync" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.895348 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2acebf7-3c11-4c28-b0fc-980c3a05ab71" containerName="nova-cell1-conductor-db-sync" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.895545 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2acebf7-3c11-4c28-b0fc-980c3a05ab71" containerName="nova-cell1-conductor-db-sync" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.895571 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" containerName="nova-manage" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.896316 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.898774 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.940151 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 16:26:45 crc kubenswrapper[5032]: I0122 16:26:45.958084 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.029384 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.029897 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-log" containerID="cri-o://cd38915d2f00fb4fc13e43a27a7fa107f84a31d1befa3bd9f982e806422c8db8" gracePeriod=30 Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.030220 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-api" containerID="cri-o://59c9eddde67294a68f55927df095250cf658786eb76499fc8a83d0808e4d738d" gracePeriod=30 Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.034705 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.188:8774/\": EOF" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.034726 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.188:8774/\": EOF" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.066474 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.091448 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-swift-storage-0\") pod \"3f47d550-9337-412b-be06-1034c44509c9\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.091557 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-svc\") pod \"3f47d550-9337-412b-be06-1034c44509c9\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.091596 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-sb\") pod \"3f47d550-9337-412b-be06-1034c44509c9\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.091684 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-nb\") pod \"3f47d550-9337-412b-be06-1034c44509c9\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.091707 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ld7cf\" (UniqueName: \"kubernetes.io/projected/3f47d550-9337-412b-be06-1034c44509c9-kube-api-access-ld7cf\") pod \"3f47d550-9337-412b-be06-1034c44509c9\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.091748 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-config\") pod \"3f47d550-9337-412b-be06-1034c44509c9\" (UID: \"3f47d550-9337-412b-be06-1034c44509c9\") " Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.092035 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df5d0503-cd88-4501-9466-5e818158b87e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.092105 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2bkd\" (UniqueName: \"kubernetes.io/projected/df5d0503-cd88-4501-9466-5e818158b87e-kube-api-access-g2bkd\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.092186 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df5d0503-cd88-4501-9466-5e818158b87e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.108044 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f47d550-9337-412b-be06-1034c44509c9-kube-api-access-ld7cf" (OuterVolumeSpecName: "kube-api-access-ld7cf") pod "3f47d550-9337-412b-be06-1034c44509c9" (UID: "3f47d550-9337-412b-be06-1034c44509c9"). InnerVolumeSpecName "kube-api-access-ld7cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.188601 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3f47d550-9337-412b-be06-1034c44509c9" (UID: "3f47d550-9337-412b-be06-1034c44509c9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.195625 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df5d0503-cd88-4501-9466-5e818158b87e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.195764 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df5d0503-cd88-4501-9466-5e818158b87e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.195830 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2bkd\" (UniqueName: \"kubernetes.io/projected/df5d0503-cd88-4501-9466-5e818158b87e-kube-api-access-g2bkd\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.195912 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.195924 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ld7cf\" (UniqueName: \"kubernetes.io/projected/3f47d550-9337-412b-be06-1034c44509c9-kube-api-access-ld7cf\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.197504 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-config" (OuterVolumeSpecName: "config") pod "3f47d550-9337-412b-be06-1034c44509c9" (UID: "3f47d550-9337-412b-be06-1034c44509c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.204554 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df5d0503-cd88-4501-9466-5e818158b87e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.227150 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df5d0503-cd88-4501-9466-5e818158b87e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.234931 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2bkd\" (UniqueName: \"kubernetes.io/projected/df5d0503-cd88-4501-9466-5e818158b87e-kube-api-access-g2bkd\") pod \"nova-cell1-conductor-0\" (UID: \"df5d0503-cd88-4501-9466-5e818158b87e\") " pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.238517 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3f47d550-9337-412b-be06-1034c44509c9" (UID: "3f47d550-9337-412b-be06-1034c44509c9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.247052 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3f47d550-9337-412b-be06-1034c44509c9" (UID: "3f47d550-9337-412b-be06-1034c44509c9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.249377 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3f47d550-9337-412b-be06-1034c44509c9" (UID: "3f47d550-9337-412b-be06-1034c44509c9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.273272 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.299949 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.299982 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.299992 5032 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.300004 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f47d550-9337-412b-be06-1034c44509c9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.476619 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.780880 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.816008 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"df5d0503-cd88-4501-9466-5e818158b87e","Type":"ContainerStarted","Data":"b3777f8fc9618a597bba6339fe123e3abe51659e6e419ab3ee3e5379f80c829a"} Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.818110 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" event={"ID":"3f47d550-9337-412b-be06-1034c44509c9","Type":"ContainerDied","Data":"bcc0feac34974df6bfcfd803062b03af93d586b6bed124963421fef53af00427"} Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.818149 5032 scope.go:117] "RemoveContainer" containerID="ee7960c8547486c961e372b3a389acdc7d5ba27372e55f58797f611a451c97c5" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.818317 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-84wwk" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.824465 5032 generic.go:334] "Generic (PLEG): container finished" podID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerID="cd38915d2f00fb4fc13e43a27a7fa107f84a31d1befa3bd9f982e806422c8db8" exitCode=143 Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.825471 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"02dafeaa-651a-4a16-a674-2a47fb1a929e","Type":"ContainerDied","Data":"cd38915d2f00fb4fc13e43a27a7fa107f84a31d1befa3bd9f982e806422c8db8"} Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.907725 5032 scope.go:117] "RemoveContainer" containerID="be08ce8794a867ea080097dc3b1577e11decd52009750084d34cbe6207491f79" Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.927590 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-84wwk"] Jan 22 16:26:46 crc kubenswrapper[5032]: I0122 16:26:46.942794 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-84wwk"] Jan 22 16:26:47 crc kubenswrapper[5032]: I0122 16:26:47.839289 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"df5d0503-cd88-4501-9466-5e818158b87e","Type":"ContainerStarted","Data":"1fb8f80b3ef1cb086b84763747814ed469820b0906151417cf565237f510934d"} Jan 22 16:26:47 crc kubenswrapper[5032]: I0122 16:26:47.839384 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="26be7f12-9b5e-47fe-b897-422800b99608" containerName="nova-scheduler-scheduler" containerID="cri-o://ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935" gracePeriod=30 Jan 22 16:26:47 crc kubenswrapper[5032]: I0122 16:26:47.870820 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.870797657 podStartE2EDuration="2.870797657s" podCreationTimestamp="2026-01-22 16:26:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:26:47.857646876 +0000 UTC m=+1311.669815907" watchObservedRunningTime="2026-01-22 16:26:47.870797657 +0000 UTC m=+1311.682966688" Jan 22 16:26:48 crc kubenswrapper[5032]: I0122 16:26:48.464029 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f47d550-9337-412b-be06-1034c44509c9" path="/var/lib/kubelet/pods/3f47d550-9337-412b-be06-1034c44509c9/volumes" Jan 22 16:26:48 crc kubenswrapper[5032]: I0122 16:26:48.847191 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:49 crc kubenswrapper[5032]: E0122 16:26:49.951643 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 16:26:49 crc kubenswrapper[5032]: E0122 16:26:49.953317 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 16:26:49 crc kubenswrapper[5032]: E0122 16:26:49.955036 5032 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 16:26:49 crc kubenswrapper[5032]: E0122 16:26:49.955100 5032 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="26be7f12-9b5e-47fe-b897-422800b99608" containerName="nova-scheduler-scheduler" Jan 22 16:26:50 crc kubenswrapper[5032]: I0122 16:26:50.862477 5032 generic.go:334] "Generic (PLEG): container finished" podID="26be7f12-9b5e-47fe-b897-422800b99608" containerID="ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935" exitCode=0 Jan 22 16:26:50 crc kubenswrapper[5032]: I0122 16:26:50.862527 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"26be7f12-9b5e-47fe-b897-422800b99608","Type":"ContainerDied","Data":"ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935"} Jan 22 16:26:50 crc kubenswrapper[5032]: I0122 16:26:50.995789 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.104966 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-config-data\") pod \"26be7f12-9b5e-47fe-b897-422800b99608\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.105034 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-combined-ca-bundle\") pod \"26be7f12-9b5e-47fe-b897-422800b99608\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.105085 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcmh7\" (UniqueName: \"kubernetes.io/projected/26be7f12-9b5e-47fe-b897-422800b99608-kube-api-access-xcmh7\") pod \"26be7f12-9b5e-47fe-b897-422800b99608\" (UID: \"26be7f12-9b5e-47fe-b897-422800b99608\") " Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.112031 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26be7f12-9b5e-47fe-b897-422800b99608-kube-api-access-xcmh7" (OuterVolumeSpecName: "kube-api-access-xcmh7") pod "26be7f12-9b5e-47fe-b897-422800b99608" (UID: "26be7f12-9b5e-47fe-b897-422800b99608"). InnerVolumeSpecName "kube-api-access-xcmh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.136799 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "26be7f12-9b5e-47fe-b897-422800b99608" (UID: "26be7f12-9b5e-47fe-b897-422800b99608"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.142222 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-config-data" (OuterVolumeSpecName: "config-data") pod "26be7f12-9b5e-47fe-b897-422800b99608" (UID: "26be7f12-9b5e-47fe-b897-422800b99608"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.207016 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcmh7\" (UniqueName: \"kubernetes.io/projected/26be7f12-9b5e-47fe-b897-422800b99608-kube-api-access-xcmh7\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.207044 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.207054 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26be7f12-9b5e-47fe-b897-422800b99608-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.881722 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.881717 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"26be7f12-9b5e-47fe-b897-422800b99608","Type":"ContainerDied","Data":"dde762e8d14572ef389bb87cd99e85564c174e943a107cedb9c9bb436a111dd5"} Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.882224 5032 scope.go:117] "RemoveContainer" containerID="ab4dcaa253aa8058ff3ecb1e5e629beab0d57b36f6dae77fb749275b2abaa935" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.885009 5032 generic.go:334] "Generic (PLEG): container finished" podID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerID="59c9eddde67294a68f55927df095250cf658786eb76499fc8a83d0808e4d738d" exitCode=0 Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.885051 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"02dafeaa-651a-4a16-a674-2a47fb1a929e","Type":"ContainerDied","Data":"59c9eddde67294a68f55927df095250cf658786eb76499fc8a83d0808e4d738d"} Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.919068 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.938054 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.951141 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:51 crc kubenswrapper[5032]: E0122 16:26:51.951554 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f47d550-9337-412b-be06-1034c44509c9" containerName="init" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.951569 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f47d550-9337-412b-be06-1034c44509c9" containerName="init" Jan 22 16:26:51 crc kubenswrapper[5032]: E0122 16:26:51.951583 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f47d550-9337-412b-be06-1034c44509c9" containerName="dnsmasq-dns" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.951590 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f47d550-9337-412b-be06-1034c44509c9" containerName="dnsmasq-dns" Jan 22 16:26:51 crc kubenswrapper[5032]: E0122 16:26:51.951598 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26be7f12-9b5e-47fe-b897-422800b99608" containerName="nova-scheduler-scheduler" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.951606 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="26be7f12-9b5e-47fe-b897-422800b99608" containerName="nova-scheduler-scheduler" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.951822 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f47d550-9337-412b-be06-1034c44509c9" containerName="dnsmasq-dns" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.951843 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="26be7f12-9b5e-47fe-b897-422800b99608" containerName="nova-scheduler-scheduler" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.952559 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.955114 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 16:26:51 crc kubenswrapper[5032]: I0122 16:26:51.965956 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.045308 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122021 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-config-data\") pod \"02dafeaa-651a-4a16-a674-2a47fb1a929e\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122143 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02dafeaa-651a-4a16-a674-2a47fb1a929e-logs\") pod \"02dafeaa-651a-4a16-a674-2a47fb1a929e\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122243 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-combined-ca-bundle\") pod \"02dafeaa-651a-4a16-a674-2a47fb1a929e\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122283 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrmtr\" (UniqueName: \"kubernetes.io/projected/02dafeaa-651a-4a16-a674-2a47fb1a929e-kube-api-access-rrmtr\") pod \"02dafeaa-651a-4a16-a674-2a47fb1a929e\" (UID: \"02dafeaa-651a-4a16-a674-2a47fb1a929e\") " Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122661 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xgh6\" (UniqueName: \"kubernetes.io/projected/ce8e3b4d-99a8-43fe-8220-41a112314656-kube-api-access-4xgh6\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122705 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-config-data\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122728 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.122955 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02dafeaa-651a-4a16-a674-2a47fb1a929e-logs" (OuterVolumeSpecName: "logs") pod "02dafeaa-651a-4a16-a674-2a47fb1a929e" (UID: "02dafeaa-651a-4a16-a674-2a47fb1a929e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.133054 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02dafeaa-651a-4a16-a674-2a47fb1a929e-kube-api-access-rrmtr" (OuterVolumeSpecName: "kube-api-access-rrmtr") pod "02dafeaa-651a-4a16-a674-2a47fb1a929e" (UID: "02dafeaa-651a-4a16-a674-2a47fb1a929e"). InnerVolumeSpecName "kube-api-access-rrmtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.144831 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-config-data" (OuterVolumeSpecName: "config-data") pod "02dafeaa-651a-4a16-a674-2a47fb1a929e" (UID: "02dafeaa-651a-4a16-a674-2a47fb1a929e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.156458 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02dafeaa-651a-4a16-a674-2a47fb1a929e" (UID: "02dafeaa-651a-4a16-a674-2a47fb1a929e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.224286 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xgh6\" (UniqueName: \"kubernetes.io/projected/ce8e3b4d-99a8-43fe-8220-41a112314656-kube-api-access-4xgh6\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.224646 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-config-data\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.224678 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.225237 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.225254 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02dafeaa-651a-4a16-a674-2a47fb1a929e-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.225265 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02dafeaa-651a-4a16-a674-2a47fb1a929e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.225276 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrmtr\" (UniqueName: \"kubernetes.io/projected/02dafeaa-651a-4a16-a674-2a47fb1a929e-kube-api-access-rrmtr\") on node \"crc\" DevicePath \"\"" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.228142 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-config-data\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.228716 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.239312 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xgh6\" (UniqueName: \"kubernetes.io/projected/ce8e3b4d-99a8-43fe-8220-41a112314656-kube-api-access-4xgh6\") pod \"nova-scheduler-0\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.343632 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.494404 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26be7f12-9b5e-47fe-b897-422800b99608" path="/var/lib/kubelet/pods/26be7f12-9b5e-47fe-b897-422800b99608/volumes" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.848699 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.899726 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ce8e3b4d-99a8-43fe-8220-41a112314656","Type":"ContainerStarted","Data":"9a75a646dbbeeb133adbc620dd81345edb417c5138975c0a6013295327f84567"} Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.903604 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"02dafeaa-651a-4a16-a674-2a47fb1a929e","Type":"ContainerDied","Data":"521dec4b1be170c3f7ae981adeace12d8e39a7857cdc868b54a8fc3ffb037fd9"} Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.903706 5032 scope.go:117] "RemoveContainer" containerID="59c9eddde67294a68f55927df095250cf658786eb76499fc8a83d0808e4d738d" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.903636 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.957033 5032 scope.go:117] "RemoveContainer" containerID="cd38915d2f00fb4fc13e43a27a7fa107f84a31d1befa3bd9f982e806422c8db8" Jan 22 16:26:52 crc kubenswrapper[5032]: I0122 16:26:52.964633 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:52.991078 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.002020 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:53 crc kubenswrapper[5032]: E0122 16:26:53.002355 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-log" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.002366 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-log" Jan 22 16:26:53 crc kubenswrapper[5032]: E0122 16:26:53.002384 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-api" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.002390 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-api" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.002639 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-api" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.002674 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" containerName="nova-api-log" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.003515 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.017627 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.025597 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.148274 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8c8c0e-21c6-422c-8903-7a032b55812a-logs\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.148350 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f98cn\" (UniqueName: \"kubernetes.io/projected/9f8c8c0e-21c6-422c-8903-7a032b55812a-kube-api-access-f98cn\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.148466 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.148488 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-config-data\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.250451 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8c8c0e-21c6-422c-8903-7a032b55812a-logs\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.250534 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f98cn\" (UniqueName: \"kubernetes.io/projected/9f8c8c0e-21c6-422c-8903-7a032b55812a-kube-api-access-f98cn\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.250624 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.250646 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-config-data\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.251343 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8c8c0e-21c6-422c-8903-7a032b55812a-logs\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.258440 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-config-data\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.259358 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.270394 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f98cn\" (UniqueName: \"kubernetes.io/projected/9f8c8c0e-21c6-422c-8903-7a032b55812a-kube-api-access-f98cn\") pod \"nova-api-0\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.366220 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.848682 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:26:53 crc kubenswrapper[5032]: W0122 16:26:53.848717 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f8c8c0e_21c6_422c_8903_7a032b55812a.slice/crio-c99a75997884c333d4747fdcd6e7afccc694642e0f75c1f5705d331828006e64 WatchSource:0}: Error finding container c99a75997884c333d4747fdcd6e7afccc694642e0f75c1f5705d331828006e64: Status 404 returned error can't find the container with id c99a75997884c333d4747fdcd6e7afccc694642e0f75c1f5705d331828006e64 Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.916624 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9f8c8c0e-21c6-422c-8903-7a032b55812a","Type":"ContainerStarted","Data":"c99a75997884c333d4747fdcd6e7afccc694642e0f75c1f5705d331828006e64"} Jan 22 16:26:53 crc kubenswrapper[5032]: I0122 16:26:53.917878 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ce8e3b4d-99a8-43fe-8220-41a112314656","Type":"ContainerStarted","Data":"8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2"} Jan 22 16:26:54 crc kubenswrapper[5032]: I0122 16:26:54.013147 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.013130981 podStartE2EDuration="3.013130981s" podCreationTimestamp="2026-01-22 16:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:26:54.009171816 +0000 UTC m=+1317.821340887" watchObservedRunningTime="2026-01-22 16:26:54.013130981 +0000 UTC m=+1317.825300012" Jan 22 16:26:54 crc kubenswrapper[5032]: I0122 16:26:54.466341 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02dafeaa-651a-4a16-a674-2a47fb1a929e" path="/var/lib/kubelet/pods/02dafeaa-651a-4a16-a674-2a47fb1a929e/volumes" Jan 22 16:26:54 crc kubenswrapper[5032]: I0122 16:26:54.932081 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9f8c8c0e-21c6-422c-8903-7a032b55812a","Type":"ContainerStarted","Data":"579673db9a7e610fd4d0170da1b5fc7678df1a08f85b871f5f895ce073e42f85"} Jan 22 16:26:54 crc kubenswrapper[5032]: I0122 16:26:54.932145 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9f8c8c0e-21c6-422c-8903-7a032b55812a","Type":"ContainerStarted","Data":"c606b25cf7342a6c0c3329adfea7b23f61ec6da9607a41b3d22c9ced78398c74"} Jan 22 16:26:54 crc kubenswrapper[5032]: I0122 16:26:54.955354 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.955334369 podStartE2EDuration="2.955334369s" podCreationTimestamp="2026-01-22 16:26:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:26:54.95012885 +0000 UTC m=+1318.762297891" watchObservedRunningTime="2026-01-22 16:26:54.955334369 +0000 UTC m=+1318.767503400" Jan 22 16:26:56 crc kubenswrapper[5032]: I0122 16:26:56.310593 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 22 16:26:57 crc kubenswrapper[5032]: I0122 16:26:57.344906 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 16:27:02 crc kubenswrapper[5032]: I0122 16:27:02.344425 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 16:27:02 crc kubenswrapper[5032]: I0122 16:27:02.384179 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 16:27:03 crc kubenswrapper[5032]: I0122 16:27:03.016660 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:27:03 crc kubenswrapper[5032]: I0122 16:27:03.016736 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:27:03 crc kubenswrapper[5032]: I0122 16:27:03.061471 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 16:27:03 crc kubenswrapper[5032]: I0122 16:27:03.366596 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 16:27:03 crc kubenswrapper[5032]: I0122 16:27:03.366635 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 16:27:04 crc kubenswrapper[5032]: I0122 16:27:04.407057 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:04 crc kubenswrapper[5032]: I0122 16:27:04.449038 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.111106 5032 generic.go:334] "Generic (PLEG): container finished" podID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerID="187319375ed35002597711ca0119b2da3b8fcd449223b50013a7ca5972623cb6" exitCode=137 Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.111283 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e","Type":"ContainerDied","Data":"187319375ed35002597711ca0119b2da3b8fcd449223b50013a7ca5972623cb6"} Jan 22 16:27:10 crc kubenswrapper[5032]: E0122 16:27:10.112321 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebcf61e5_f002_4991_a3c6_4d2d76a99078.slice/crio-conmon-391d21da41bd3d4885d249378d16083cd3a62e2575da39d4ef5bd1276b38b691.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebcf61e5_f002_4991_a3c6_4d2d76a99078.slice/crio-391d21da41bd3d4885d249378d16083cd3a62e2575da39d4ef5bd1276b38b691.scope\": RecentStats: unable to find data in memory cache]" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.113376 5032 generic.go:334] "Generic (PLEG): container finished" podID="ebcf61e5-f002-4991-a3c6-4d2d76a99078" containerID="391d21da41bd3d4885d249378d16083cd3a62e2575da39d4ef5bd1276b38b691" exitCode=137 Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.113402 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ebcf61e5-f002-4991-a3c6-4d2d76a99078","Type":"ContainerDied","Data":"391d21da41bd3d4885d249378d16083cd3a62e2575da39d4ef5bd1276b38b691"} Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.258073 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.272086 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.325726 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.406917 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r894f\" (UniqueName: \"kubernetes.io/projected/ebcf61e5-f002-4991-a3c6-4d2d76a99078-kube-api-access-r894f\") pod \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.406970 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjt24\" (UniqueName: \"kubernetes.io/projected/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-kube-api-access-sjt24\") pod \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.407057 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-config-data\") pod \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.407149 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-config-data\") pod \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.407185 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-combined-ca-bundle\") pod \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.407212 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-logs\") pod \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\" (UID: \"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e\") " Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.407234 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-combined-ca-bundle\") pod \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\" (UID: \"ebcf61e5-f002-4991-a3c6-4d2d76a99078\") " Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.408242 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-logs" (OuterVolumeSpecName: "logs") pod "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" (UID: "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.412523 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebcf61e5-f002-4991-a3c6-4d2d76a99078-kube-api-access-r894f" (OuterVolumeSpecName: "kube-api-access-r894f") pod "ebcf61e5-f002-4991-a3c6-4d2d76a99078" (UID: "ebcf61e5-f002-4991-a3c6-4d2d76a99078"). InnerVolumeSpecName "kube-api-access-r894f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.412570 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-kube-api-access-sjt24" (OuterVolumeSpecName: "kube-api-access-sjt24") pod "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" (UID: "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e"). InnerVolumeSpecName "kube-api-access-sjt24". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.433028 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ebcf61e5-f002-4991-a3c6-4d2d76a99078" (UID: "ebcf61e5-f002-4991-a3c6-4d2d76a99078"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.434622 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-config-data" (OuterVolumeSpecName: "config-data") pod "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" (UID: "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.447228 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" (UID: "150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.447352 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-config-data" (OuterVolumeSpecName: "config-data") pod "ebcf61e5-f002-4991-a3c6-4d2d76a99078" (UID: "ebcf61e5-f002-4991-a3c6-4d2d76a99078"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.509540 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r894f\" (UniqueName: \"kubernetes.io/projected/ebcf61e5-f002-4991-a3c6-4d2d76a99078-kube-api-access-r894f\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.509579 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjt24\" (UniqueName: \"kubernetes.io/projected/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-kube-api-access-sjt24\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.509589 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.509599 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.509608 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.509618 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:10 crc kubenswrapper[5032]: I0122 16:27:10.509626 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebcf61e5-f002-4991-a3c6-4d2d76a99078-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.123749 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e","Type":"ContainerDied","Data":"a51cb0e975a39f533ebab1a78fd4a7a481e759c2dae82c09101fabf7533b2f27"} Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.123845 5032 scope.go:117] "RemoveContainer" containerID="187319375ed35002597711ca0119b2da3b8fcd449223b50013a7ca5972623cb6" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.123810 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.126169 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ebcf61e5-f002-4991-a3c6-4d2d76a99078","Type":"ContainerDied","Data":"9dab111c61de7c7fe653bf30b435a5add9a15f3e3fafca8bc0873b3060e7d4fa"} Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.126202 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.152750 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.168919 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.176331 5032 scope.go:117] "RemoveContainer" containerID="4511510e16a9f62fcbbb95875e87a73ed8b3f90e3f0c2a40044dd406b2fb8650" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.177910 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.189052 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.203972 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: E0122 16:27:11.204355 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-log" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.204373 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-log" Jan 22 16:27:11 crc kubenswrapper[5032]: E0122 16:27:11.204382 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebcf61e5-f002-4991-a3c6-4d2d76a99078" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.204390 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebcf61e5-f002-4991-a3c6-4d2d76a99078" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 16:27:11 crc kubenswrapper[5032]: E0122 16:27:11.204423 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-metadata" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.204431 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-metadata" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.204598 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-log" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.204609 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" containerName="nova-metadata-metadata" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.204625 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebcf61e5-f002-4991-a3c6-4d2d76a99078" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.205524 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.209429 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.210196 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.221699 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.222878 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.226180 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.226396 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.227179 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.235696 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.240755 5032 scope.go:117] "RemoveContainer" containerID="391d21da41bd3d4885d249378d16083cd3a62e2575da39d4ef5bd1276b38b691" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.243603 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.329478 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xvt7\" (UniqueName: \"kubernetes.io/projected/c7489d16-2342-49b9-b936-18f7847fb26e-kube-api-access-2xvt7\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.329527 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.329564 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.329678 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-logs\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.329792 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.329831 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.329934 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.330092 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-config-data\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.330166 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.330240 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slrwj\" (UniqueName: \"kubernetes.io/projected/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-kube-api-access-slrwj\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431313 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-config-data\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431374 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431407 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slrwj\" (UniqueName: \"kubernetes.io/projected/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-kube-api-access-slrwj\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431446 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xvt7\" (UniqueName: \"kubernetes.io/projected/c7489d16-2342-49b9-b936-18f7847fb26e-kube-api-access-2xvt7\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431468 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431492 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431512 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-logs\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431542 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431561 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.431591 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.432488 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-logs\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.436210 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.436298 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.436425 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-config-data\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.439303 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.439467 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.439568 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7489d16-2342-49b9-b936-18f7847fb26e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.439791 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.451021 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xvt7\" (UniqueName: \"kubernetes.io/projected/c7489d16-2342-49b9-b936-18f7847fb26e-kube-api-access-2xvt7\") pod \"nova-cell1-novncproxy-0\" (UID: \"c7489d16-2342-49b9-b936-18f7847fb26e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.451102 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slrwj\" (UniqueName: \"kubernetes.io/projected/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-kube-api-access-slrwj\") pod \"nova-metadata-0\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.543836 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:11 crc kubenswrapper[5032]: I0122 16:27:11.554442 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:12 crc kubenswrapper[5032]: I0122 16:27:12.129032 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:12 crc kubenswrapper[5032]: I0122 16:27:12.139685 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 16:27:12 crc kubenswrapper[5032]: W0122 16:27:12.141557 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7489d16_2342_49b9_b936_18f7847fb26e.slice/crio-5b0d948213b686e781de6c1d53f05be9e472c3188371c2a0db45c2af354755c0 WatchSource:0}: Error finding container 5b0d948213b686e781de6c1d53f05be9e472c3188371c2a0db45c2af354755c0: Status 404 returned error can't find the container with id 5b0d948213b686e781de6c1d53f05be9e472c3188371c2a0db45c2af354755c0 Jan 22 16:27:12 crc kubenswrapper[5032]: I0122 16:27:12.464627 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e" path="/var/lib/kubelet/pods/150b5fd4-69b0-4bbe-a1b1-aacdc7e38e0e/volumes" Jan 22 16:27:12 crc kubenswrapper[5032]: I0122 16:27:12.465544 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebcf61e5-f002-4991-a3c6-4d2d76a99078" path="/var/lib/kubelet/pods/ebcf61e5-f002-4991-a3c6-4d2d76a99078/volumes" Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.154026 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e","Type":"ContainerStarted","Data":"cf04f36add102f7cac258558524de49e83ed544592df4f0a512d08cf888e0f15"} Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.154369 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e","Type":"ContainerStarted","Data":"7f95a307a6d46200e7e2d7ab557272968ff779f1028e08064ae6d9498ea82aac"} Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.156557 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c7489d16-2342-49b9-b936-18f7847fb26e","Type":"ContainerStarted","Data":"085f2bdb8dcc9077cbe1773f790ce9e1d392fadd41c1ccc96a6755e7763d268a"} Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.156583 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c7489d16-2342-49b9-b936-18f7847fb26e","Type":"ContainerStarted","Data":"5b0d948213b686e781de6c1d53f05be9e472c3188371c2a0db45c2af354755c0"} Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.182663 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.18264407 podStartE2EDuration="2.18264407s" podCreationTimestamp="2026-01-22 16:27:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:13.174074551 +0000 UTC m=+1336.986243582" watchObservedRunningTime="2026-01-22 16:27:13.18264407 +0000 UTC m=+1336.994813101" Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.370375 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.370968 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.371327 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 16:27:13 crc kubenswrapper[5032]: I0122 16:27:13.373993 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.173547 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e","Type":"ContainerStarted","Data":"7efb84d1b82111866f026c33af2d9539a002bdb2acf94c0544340f74c3eecd1e"} Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.173606 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.184711 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.207715 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.207687126 podStartE2EDuration="3.207687126s" podCreationTimestamp="2026-01-22 16:27:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:14.195663135 +0000 UTC m=+1338.007832216" watchObservedRunningTime="2026-01-22 16:27:14.207687126 +0000 UTC m=+1338.019856197" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.383515 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-cbh77"] Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.385010 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.412738 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-cbh77"] Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.512535 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.512707 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.513129 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.513161 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mzbz\" (UniqueName: \"kubernetes.io/projected/f1f2adcd-427c-4135-a0a7-35e79338861d-kube-api-access-9mzbz\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.513302 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-config\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.513475 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.615044 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.615084 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mzbz\" (UniqueName: \"kubernetes.io/projected/f1f2adcd-427c-4135-a0a7-35e79338861d-kube-api-access-9mzbz\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.615135 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-config\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.615172 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.615219 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.615264 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.616003 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.616001 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.616512 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-config\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.616927 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.617031 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.653673 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mzbz\" (UniqueName: \"kubernetes.io/projected/f1f2adcd-427c-4135-a0a7-35e79338861d-kube-api-access-9mzbz\") pod \"dnsmasq-dns-cd5cbd7b9-cbh77\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:14 crc kubenswrapper[5032]: I0122 16:27:14.702693 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:15 crc kubenswrapper[5032]: I0122 16:27:15.229831 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-cbh77"] Jan 22 16:27:15 crc kubenswrapper[5032]: W0122 16:27:15.239072 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1f2adcd_427c_4135_a0a7_35e79338861d.slice/crio-136c83f3dddac64fc9f88602f1591b934bd1f6bc923e89f969757cfee706e63b WatchSource:0}: Error finding container 136c83f3dddac64fc9f88602f1591b934bd1f6bc923e89f969757cfee706e63b: Status 404 returned error can't find the container with id 136c83f3dddac64fc9f88602f1591b934bd1f6bc923e89f969757cfee706e63b Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.188483 5032 generic.go:334] "Generic (PLEG): container finished" podID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerID="99170e72eecb54f889a817187f985008214abb520678f64a17cee4ac5ea849fc" exitCode=0 Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.188561 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" event={"ID":"f1f2adcd-427c-4135-a0a7-35e79338861d","Type":"ContainerDied","Data":"99170e72eecb54f889a817187f985008214abb520678f64a17cee4ac5ea849fc"} Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.189043 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" event={"ID":"f1f2adcd-427c-4135-a0a7-35e79338861d","Type":"ContainerStarted","Data":"136c83f3dddac64fc9f88602f1591b934bd1f6bc923e89f969757cfee706e63b"} Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.546139 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.546513 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.552201 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.558038 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.591495 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.591758 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-central-agent" containerID="cri-o://0e5ba6d6e690cfe7b5f60321eb6f176f87444f23739db05aa9f75c8f78266a3f" gracePeriod=30 Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.591883 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="proxy-httpd" containerID="cri-o://0ee997e14bb4a85e6a2f7aee86b3f4dbc08db421cccedab0281f6c506fa2ff64" gracePeriod=30 Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.591942 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="sg-core" containerID="cri-o://a1731ad3bc212d2aef48e58c7f6cfb829f1867142ce8e89f8ae70d19c356e506" gracePeriod=30 Jan 22 16:27:16 crc kubenswrapper[5032]: I0122 16:27:16.591979 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-notification-agent" containerID="cri-o://3e4e71c0e814d38794610c7dca4aecab7b48a71e6cdfdc5d739f48651eab462f" gracePeriod=30 Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.204565 5032 generic.go:334] "Generic (PLEG): container finished" podID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerID="0ee997e14bb4a85e6a2f7aee86b3f4dbc08db421cccedab0281f6c506fa2ff64" exitCode=0 Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.205223 5032 generic.go:334] "Generic (PLEG): container finished" podID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerID="a1731ad3bc212d2aef48e58c7f6cfb829f1867142ce8e89f8ae70d19c356e506" exitCode=2 Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.205362 5032 generic.go:334] "Generic (PLEG): container finished" podID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerID="0e5ba6d6e690cfe7b5f60321eb6f176f87444f23739db05aa9f75c8f78266a3f" exitCode=0 Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.204654 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerDied","Data":"0ee997e14bb4a85e6a2f7aee86b3f4dbc08db421cccedab0281f6c506fa2ff64"} Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.205587 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerDied","Data":"a1731ad3bc212d2aef48e58c7f6cfb829f1867142ce8e89f8ae70d19c356e506"} Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.205607 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerDied","Data":"0e5ba6d6e690cfe7b5f60321eb6f176f87444f23739db05aa9f75c8f78266a3f"} Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.208923 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" event={"ID":"f1f2adcd-427c-4135-a0a7-35e79338861d","Type":"ContainerStarted","Data":"c4b16a7ac5e338cb6003d559f293b70da8617aaa35e4225a9f7531fe20a90e08"} Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.209015 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.209459 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-api" containerID="cri-o://579673db9a7e610fd4d0170da1b5fc7678df1a08f85b871f5f895ce073e42f85" gracePeriod=30 Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.209689 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-log" containerID="cri-o://c606b25cf7342a6c0c3329adfea7b23f61ec6da9607a41b3d22c9ced78398c74" gracePeriod=30 Jan 22 16:27:17 crc kubenswrapper[5032]: I0122 16:27:17.235196 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" podStartSLOduration=3.235179024 podStartE2EDuration="3.235179024s" podCreationTimestamp="2026-01-22 16:27:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:17.234021783 +0000 UTC m=+1341.046190824" watchObservedRunningTime="2026-01-22 16:27:17.235179024 +0000 UTC m=+1341.047348055" Jan 22 16:27:18 crc kubenswrapper[5032]: I0122 16:27:18.220273 5032 generic.go:334] "Generic (PLEG): container finished" podID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerID="c606b25cf7342a6c0c3329adfea7b23f61ec6da9607a41b3d22c9ced78398c74" exitCode=143 Jan 22 16:27:18 crc kubenswrapper[5032]: I0122 16:27:18.220370 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9f8c8c0e-21c6-422c-8903-7a032b55812a","Type":"ContainerDied","Data":"c606b25cf7342a6c0c3329adfea7b23f61ec6da9607a41b3d22c9ced78398c74"} Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.230943 5032 generic.go:334] "Generic (PLEG): container finished" podID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerID="3e4e71c0e814d38794610c7dca4aecab7b48a71e6cdfdc5d739f48651eab462f" exitCode=0 Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.231284 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerDied","Data":"3e4e71c0e814d38794610c7dca4aecab7b48a71e6cdfdc5d739f48651eab462f"} Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.634350 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.751655 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-ceilometer-tls-certs\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.751725 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-log-httpd\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.752510 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.752608 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-combined-ca-bundle\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.753097 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-run-httpd\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.753172 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-sg-core-conf-yaml\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.753254 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4znnw\" (UniqueName: \"kubernetes.io/projected/98ac7768-3f90-4ec2-ac29-11eafd105fe2-kube-api-access-4znnw\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.753286 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-config-data\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.753441 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-scripts\") pod \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\" (UID: \"98ac7768-3f90-4ec2-ac29-11eafd105fe2\") " Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.753709 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.754336 5032 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.754359 5032 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98ac7768-3f90-4ec2-ac29-11eafd105fe2-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.760548 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98ac7768-3f90-4ec2-ac29-11eafd105fe2-kube-api-access-4znnw" (OuterVolumeSpecName: "kube-api-access-4znnw") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "kube-api-access-4znnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.762173 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-scripts" (OuterVolumeSpecName: "scripts") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.796047 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.839044 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.856160 5032 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.856195 5032 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.856209 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4znnw\" (UniqueName: \"kubernetes.io/projected/98ac7768-3f90-4ec2-ac29-11eafd105fe2-kube-api-access-4znnw\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.856225 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.862510 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-config-data" (OuterVolumeSpecName: "config-data") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.862982 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98ac7768-3f90-4ec2-ac29-11eafd105fe2" (UID: "98ac7768-3f90-4ec2-ac29-11eafd105fe2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.958250 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:19 crc kubenswrapper[5032]: I0122 16:27:19.958283 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ac7768-3f90-4ec2-ac29-11eafd105fe2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.247793 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"98ac7768-3f90-4ec2-ac29-11eafd105fe2","Type":"ContainerDied","Data":"d2070d26ddce2773208466469e900636071918173e849e930d1ad3dcbfd9c177"} Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.249213 5032 scope.go:117] "RemoveContainer" containerID="0ee997e14bb4a85e6a2f7aee86b3f4dbc08db421cccedab0281f6c506fa2ff64" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.249463 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.308769 5032 scope.go:117] "RemoveContainer" containerID="a1731ad3bc212d2aef48e58c7f6cfb829f1867142ce8e89f8ae70d19c356e506" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.324463 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.330776 5032 scope.go:117] "RemoveContainer" containerID="3e4e71c0e814d38794610c7dca4aecab7b48a71e6cdfdc5d739f48651eab462f" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.335823 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.355808 5032 scope.go:117] "RemoveContainer" containerID="0e5ba6d6e690cfe7b5f60321eb6f176f87444f23739db05aa9f75c8f78266a3f" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.356685 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:27:20 crc kubenswrapper[5032]: E0122 16:27:20.357081 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-notification-agent" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357104 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-notification-agent" Jan 22 16:27:20 crc kubenswrapper[5032]: E0122 16:27:20.357120 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="sg-core" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357129 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="sg-core" Jan 22 16:27:20 crc kubenswrapper[5032]: E0122 16:27:20.357145 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="proxy-httpd" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357152 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="proxy-httpd" Jan 22 16:27:20 crc kubenswrapper[5032]: E0122 16:27:20.357189 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-central-agent" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357197 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-central-agent" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357401 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-notification-agent" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357427 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="ceilometer-central-agent" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357458 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="sg-core" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.357474 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" containerName="proxy-httpd" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.363773 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.367975 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.368176 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.368300 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.370404 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.466512 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98ac7768-3f90-4ec2-ac29-11eafd105fe2" path="/var/lib/kubelet/pods/98ac7768-3f90-4ec2-ac29-11eafd105fe2/volumes" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483705 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7gtl\" (UniqueName: \"kubernetes.io/projected/89f96057-2096-47e7-88c3-bdeab76e4fe6-kube-api-access-h7gtl\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483752 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483810 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-config-data\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483840 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483869 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-scripts\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483923 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89f96057-2096-47e7-88c3-bdeab76e4fe6-log-httpd\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483963 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89f96057-2096-47e7-88c3-bdeab76e4fe6-run-httpd\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.483992 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.586001 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.586107 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-config-data\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.587329 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.587905 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-scripts\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.588045 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89f96057-2096-47e7-88c3-bdeab76e4fe6-log-httpd\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.588225 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89f96057-2096-47e7-88c3-bdeab76e4fe6-run-httpd\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.588330 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.588462 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7gtl\" (UniqueName: \"kubernetes.io/projected/89f96057-2096-47e7-88c3-bdeab76e4fe6-kube-api-access-h7gtl\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.588741 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89f96057-2096-47e7-88c3-bdeab76e4fe6-log-httpd\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.588962 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89f96057-2096-47e7-88c3-bdeab76e4fe6-run-httpd\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.600570 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.600570 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.600906 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.601315 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-scripts\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.602489 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f96057-2096-47e7-88c3-bdeab76e4fe6-config-data\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.609536 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7gtl\" (UniqueName: \"kubernetes.io/projected/89f96057-2096-47e7-88c3-bdeab76e4fe6-kube-api-access-h7gtl\") pod \"ceilometer-0\" (UID: \"89f96057-2096-47e7-88c3-bdeab76e4fe6\") " pod="openstack/ceilometer-0" Jan 22 16:27:20 crc kubenswrapper[5032]: I0122 16:27:20.707412 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 16:27:21 crc kubenswrapper[5032]: I0122 16:27:21.199425 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 16:27:21 crc kubenswrapper[5032]: W0122 16:27:21.209452 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89f96057_2096_47e7_88c3_bdeab76e4fe6.slice/crio-0bda4294b05b369baf21d2b7bbb04404179fdd49e9ec8f0dbedfdcf54e0e53cb WatchSource:0}: Error finding container 0bda4294b05b369baf21d2b7bbb04404179fdd49e9ec8f0dbedfdcf54e0e53cb: Status 404 returned error can't find the container with id 0bda4294b05b369baf21d2b7bbb04404179fdd49e9ec8f0dbedfdcf54e0e53cb Jan 22 16:27:21 crc kubenswrapper[5032]: I0122 16:27:21.258277 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerStarted","Data":"0bda4294b05b369baf21d2b7bbb04404179fdd49e9ec8f0dbedfdcf54e0e53cb"} Jan 22 16:27:21 crc kubenswrapper[5032]: I0122 16:27:21.545549 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 16:27:21 crc kubenswrapper[5032]: I0122 16:27:21.545595 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 16:27:21 crc kubenswrapper[5032]: I0122 16:27:21.555325 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:21 crc kubenswrapper[5032]: I0122 16:27:21.585284 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.282591 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.558148 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.558177 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.618069 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-tdfk2"] Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.619188 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.627343 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.627478 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.638711 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-tdfk2"] Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.728954 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v68l6\" (UniqueName: \"kubernetes.io/projected/e41215ab-724d-4efa-a10c-b1573c1bc3c7-kube-api-access-v68l6\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.728994 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.729057 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-scripts\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.729105 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-config-data\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.831204 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-config-data\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.831310 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v68l6\" (UniqueName: \"kubernetes.io/projected/e41215ab-724d-4efa-a10c-b1573c1bc3c7-kube-api-access-v68l6\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.831842 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.835231 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-scripts\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.836773 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.838392 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-config-data\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.858039 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v68l6\" (UniqueName: \"kubernetes.io/projected/e41215ab-724d-4efa-a10c-b1573c1bc3c7-kube-api-access-v68l6\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.866068 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-scripts\") pod \"nova-cell1-cell-mapping-tdfk2\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:22 crc kubenswrapper[5032]: I0122 16:27:22.943354 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:23 crc kubenswrapper[5032]: I0122 16:27:23.367476 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": dial tcp 10.217.0.196:8774: connect: connection refused" Jan 22 16:27:23 crc kubenswrapper[5032]: I0122 16:27:23.367707 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": dial tcp 10.217.0.196:8774: connect: connection refused" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.315714 5032 generic.go:334] "Generic (PLEG): container finished" podID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerID="579673db9a7e610fd4d0170da1b5fc7678df1a08f85b871f5f895ce073e42f85" exitCode=0 Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.315966 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9f8c8c0e-21c6-422c-8903-7a032b55812a","Type":"ContainerDied","Data":"579673db9a7e610fd4d0170da1b5fc7678df1a08f85b871f5f895ce073e42f85"} Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.366467 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-tdfk2"] Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.545304 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.686622 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-config-data\") pod \"9f8c8c0e-21c6-422c-8903-7a032b55812a\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.686808 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8c8c0e-21c6-422c-8903-7a032b55812a-logs\") pod \"9f8c8c0e-21c6-422c-8903-7a032b55812a\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.686883 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f98cn\" (UniqueName: \"kubernetes.io/projected/9f8c8c0e-21c6-422c-8903-7a032b55812a-kube-api-access-f98cn\") pod \"9f8c8c0e-21c6-422c-8903-7a032b55812a\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.686966 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-combined-ca-bundle\") pod \"9f8c8c0e-21c6-422c-8903-7a032b55812a\" (UID: \"9f8c8c0e-21c6-422c-8903-7a032b55812a\") " Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.688922 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f8c8c0e-21c6-422c-8903-7a032b55812a-logs" (OuterVolumeSpecName: "logs") pod "9f8c8c0e-21c6-422c-8903-7a032b55812a" (UID: "9f8c8c0e-21c6-422c-8903-7a032b55812a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.698968 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f8c8c0e-21c6-422c-8903-7a032b55812a-kube-api-access-f98cn" (OuterVolumeSpecName: "kube-api-access-f98cn") pod "9f8c8c0e-21c6-422c-8903-7a032b55812a" (UID: "9f8c8c0e-21c6-422c-8903-7a032b55812a"). InnerVolumeSpecName "kube-api-access-f98cn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.704090 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.716819 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-config-data" (OuterVolumeSpecName: "config-data") pod "9f8c8c0e-21c6-422c-8903-7a032b55812a" (UID: "9f8c8c0e-21c6-422c-8903-7a032b55812a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.752481 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9f8c8c0e-21c6-422c-8903-7a032b55812a" (UID: "9f8c8c0e-21c6-422c-8903-7a032b55812a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.759745 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-m6twj"] Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.759982 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" podUID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerName="dnsmasq-dns" containerID="cri-o://4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726" gracePeriod=10 Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.788958 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.788987 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f8c8c0e-21c6-422c-8903-7a032b55812a-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.788996 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f98cn\" (UniqueName: \"kubernetes.io/projected/9f8c8c0e-21c6-422c-8903-7a032b55812a-kube-api-access-f98cn\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:24 crc kubenswrapper[5032]: I0122 16:27:24.789006 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f8c8c0e-21c6-422c-8903-7a032b55812a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.160684 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.295084 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-config\") pod \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.295345 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-sb\") pod \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.295395 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-nb\") pod \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.295482 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7q2h\" (UniqueName: \"kubernetes.io/projected/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-kube-api-access-h7q2h\") pod \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.295524 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-swift-storage-0\") pod \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.295585 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-svc\") pod \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\" (UID: \"3d398643-4dc6-4bf4-9a8e-9e724e8593c3\") " Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.300284 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-kube-api-access-h7q2h" (OuterVolumeSpecName: "kube-api-access-h7q2h") pod "3d398643-4dc6-4bf4-9a8e-9e724e8593c3" (UID: "3d398643-4dc6-4bf4-9a8e-9e724e8593c3"). InnerVolumeSpecName "kube-api-access-h7q2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.327724 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerStarted","Data":"e9b5756d752a24e4b261161f3ace04c3ef419cd353590da53b00b25061410f56"} Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.327774 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerStarted","Data":"867e3e7ca9ef86730c2cf37ce0362ea03d5fb5cad1cd93017b481be88b9b3fc7"} Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.345288 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9f8c8c0e-21c6-422c-8903-7a032b55812a","Type":"ContainerDied","Data":"c99a75997884c333d4747fdcd6e7afccc694642e0f75c1f5705d331828006e64"} Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.345367 5032 scope.go:117] "RemoveContainer" containerID="579673db9a7e610fd4d0170da1b5fc7678df1a08f85b871f5f895ce073e42f85" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.345436 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.348687 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-config" (OuterVolumeSpecName: "config") pod "3d398643-4dc6-4bf4-9a8e-9e724e8593c3" (UID: "3d398643-4dc6-4bf4-9a8e-9e724e8593c3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.352651 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d398643-4dc6-4bf4-9a8e-9e724e8593c3" (UID: "3d398643-4dc6-4bf4-9a8e-9e724e8593c3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.353249 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tdfk2" event={"ID":"e41215ab-724d-4efa-a10c-b1573c1bc3c7","Type":"ContainerStarted","Data":"0fe190f1cbe52c0ec01dc502c0056bc43e43d23391f6e62a0684eb31a5a28db7"} Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.353283 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tdfk2" event={"ID":"e41215ab-724d-4efa-a10c-b1573c1bc3c7","Type":"ContainerStarted","Data":"8ab2a080ddabff0d7f3fc9dcaf530530a112dac200a610e311f10103f7beda63"} Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.358507 5032 generic.go:334] "Generic (PLEG): container finished" podID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerID="4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726" exitCode=0 Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.358540 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" event={"ID":"3d398643-4dc6-4bf4-9a8e-9e724e8593c3","Type":"ContainerDied","Data":"4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726"} Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.358559 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" event={"ID":"3d398643-4dc6-4bf4-9a8e-9e724e8593c3","Type":"ContainerDied","Data":"dfd66c034422e15437c081913cfa88781bbaf5ce5ae311f89aebf299910f85ec"} Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.358602 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-m6twj" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.361698 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3d398643-4dc6-4bf4-9a8e-9e724e8593c3" (UID: "3d398643-4dc6-4bf4-9a8e-9e724e8593c3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.373990 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3d398643-4dc6-4bf4-9a8e-9e724e8593c3" (UID: "3d398643-4dc6-4bf4-9a8e-9e724e8593c3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.378076 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-tdfk2" podStartSLOduration=3.37805428 podStartE2EDuration="3.37805428s" podCreationTimestamp="2026-01-22 16:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:25.368885606 +0000 UTC m=+1349.181054647" watchObservedRunningTime="2026-01-22 16:27:25.37805428 +0000 UTC m=+1349.190223311" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.396337 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3d398643-4dc6-4bf4-9a8e-9e724e8593c3" (UID: "3d398643-4dc6-4bf4-9a8e-9e724e8593c3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.397972 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.397994 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.398004 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.398013 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.398042 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7q2h\" (UniqueName: \"kubernetes.io/projected/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-kube-api-access-h7q2h\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.398051 5032 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d398643-4dc6-4bf4-9a8e-9e724e8593c3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.488592 5032 scope.go:117] "RemoveContainer" containerID="c606b25cf7342a6c0c3329adfea7b23f61ec6da9607a41b3d22c9ced78398c74" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.488844 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.496431 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.528898 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:25 crc kubenswrapper[5032]: E0122 16:27:25.529541 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerName="dnsmasq-dns" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.529608 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerName="dnsmasq-dns" Jan 22 16:27:25 crc kubenswrapper[5032]: E0122 16:27:25.529672 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerName="init" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.529720 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerName="init" Jan 22 16:27:25 crc kubenswrapper[5032]: E0122 16:27:25.529772 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-api" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.529873 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-api" Jan 22 16:27:25 crc kubenswrapper[5032]: E0122 16:27:25.529939 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-log" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.529997 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-log" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.530240 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" containerName="dnsmasq-dns" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.530304 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-log" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.530359 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" containerName="nova-api-api" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.531315 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.533327 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.534343 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.534472 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.539980 5032 scope.go:117] "RemoveContainer" containerID="4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.542186 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.579044 5032 scope.go:117] "RemoveContainer" containerID="caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.597048 5032 scope.go:117] "RemoveContainer" containerID="4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726" Jan 22 16:27:25 crc kubenswrapper[5032]: E0122 16:27:25.597598 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726\": container with ID starting with 4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726 not found: ID does not exist" containerID="4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.597630 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726"} err="failed to get container status \"4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726\": rpc error: code = NotFound desc = could not find container \"4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726\": container with ID starting with 4409967da9205edf41e833b8d490e3261737e24123c19f11834ad776aa711726 not found: ID does not exist" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.597649 5032 scope.go:117] "RemoveContainer" containerID="caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645" Jan 22 16:27:25 crc kubenswrapper[5032]: E0122 16:27:25.598570 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645\": container with ID starting with caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645 not found: ID does not exist" containerID="caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.598641 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645"} err="failed to get container status \"caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645\": rpc error: code = NotFound desc = could not find container \"caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645\": container with ID starting with caf30540e4bc020928f27882eec77ed40c39521148971ab6c64c7ca0485dd645 not found: ID does not exist" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.602048 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.602176 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-config-data\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.602307 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dw2rs\" (UniqueName: \"kubernetes.io/projected/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-kube-api-access-dw2rs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.602453 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.602576 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-public-tls-certs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.602749 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-logs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.704542 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-public-tls-certs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.704619 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-logs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.704708 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.704738 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-config-data\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.704772 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dw2rs\" (UniqueName: \"kubernetes.io/projected/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-kube-api-access-dw2rs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.704825 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.705447 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-logs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.710114 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-public-tls-certs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.710836 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.711999 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-config-data\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.717571 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.722342 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-m6twj"] Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.722778 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dw2rs\" (UniqueName: \"kubernetes.io/projected/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-kube-api-access-dw2rs\") pod \"nova-api-0\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " pod="openstack/nova-api-0" Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.729477 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-m6twj"] Jan 22 16:27:25 crc kubenswrapper[5032]: I0122 16:27:25.851832 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:26 crc kubenswrapper[5032]: I0122 16:27:26.323351 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:26 crc kubenswrapper[5032]: W0122 16:27:26.335596 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b4a7b78_f4fa_4ccc_8f15_e3837817f16a.slice/crio-536c8d95728a87cad7694389bcb48481d00e7fc7957fc950f728ac8a615dde3e WatchSource:0}: Error finding container 536c8d95728a87cad7694389bcb48481d00e7fc7957fc950f728ac8a615dde3e: Status 404 returned error can't find the container with id 536c8d95728a87cad7694389bcb48481d00e7fc7957fc950f728ac8a615dde3e Jan 22 16:27:26 crc kubenswrapper[5032]: I0122 16:27:26.379633 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerStarted","Data":"651205786b726a1a799e4828ccbac8e623d0f62e99897785de3ef3ce12fbf8e5"} Jan 22 16:27:26 crc kubenswrapper[5032]: I0122 16:27:26.385210 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a","Type":"ContainerStarted","Data":"536c8d95728a87cad7694389bcb48481d00e7fc7957fc950f728ac8a615dde3e"} Jan 22 16:27:26 crc kubenswrapper[5032]: I0122 16:27:26.471090 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d398643-4dc6-4bf4-9a8e-9e724e8593c3" path="/var/lib/kubelet/pods/3d398643-4dc6-4bf4-9a8e-9e724e8593c3/volumes" Jan 22 16:27:26 crc kubenswrapper[5032]: I0122 16:27:26.471705 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f8c8c0e-21c6-422c-8903-7a032b55812a" path="/var/lib/kubelet/pods/9f8c8c0e-21c6-422c-8903-7a032b55812a/volumes" Jan 22 16:27:27 crc kubenswrapper[5032]: I0122 16:27:27.395898 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerStarted","Data":"fd2d7dd74ce4e0bbc917839cf1d12ab66d0b8c91a2e5e84715d1c95c92ab886d"} Jan 22 16:27:27 crc kubenswrapper[5032]: I0122 16:27:27.396379 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 16:27:27 crc kubenswrapper[5032]: I0122 16:27:27.399364 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a","Type":"ContainerStarted","Data":"9229e1baf859dec7eb701e61172616c606ef4dabbe32630b824cf38eefcb8c5e"} Jan 22 16:27:27 crc kubenswrapper[5032]: I0122 16:27:27.399396 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a","Type":"ContainerStarted","Data":"1937c950e46ed1d98161a557653d35af9901715466a2a682b56bb5bb37d5fdaf"} Jan 22 16:27:27 crc kubenswrapper[5032]: I0122 16:27:27.421382 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.577610516 podStartE2EDuration="7.421363821s" podCreationTimestamp="2026-01-22 16:27:20 +0000 UTC" firstStartedPulling="2026-01-22 16:27:21.21240072 +0000 UTC m=+1345.024569761" lastFinishedPulling="2026-01-22 16:27:27.056154045 +0000 UTC m=+1350.868323066" observedRunningTime="2026-01-22 16:27:27.415934426 +0000 UTC m=+1351.228103457" watchObservedRunningTime="2026-01-22 16:27:27.421363821 +0000 UTC m=+1351.233532852" Jan 22 16:27:27 crc kubenswrapper[5032]: I0122 16:27:27.458803 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.4587800890000002 podStartE2EDuration="2.458780089s" podCreationTimestamp="2026-01-22 16:27:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:27.445806723 +0000 UTC m=+1351.257975754" watchObservedRunningTime="2026-01-22 16:27:27.458780089 +0000 UTC m=+1351.270949120" Jan 22 16:27:30 crc kubenswrapper[5032]: I0122 16:27:30.428073 5032 generic.go:334] "Generic (PLEG): container finished" podID="e41215ab-724d-4efa-a10c-b1573c1bc3c7" containerID="0fe190f1cbe52c0ec01dc502c0056bc43e43d23391f6e62a0684eb31a5a28db7" exitCode=0 Jan 22 16:27:30 crc kubenswrapper[5032]: I0122 16:27:30.428150 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tdfk2" event={"ID":"e41215ab-724d-4efa-a10c-b1573c1bc3c7","Type":"ContainerDied","Data":"0fe190f1cbe52c0ec01dc502c0056bc43e43d23391f6e62a0684eb31a5a28db7"} Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.564302 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.567331 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.576457 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.864818 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.922786 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-scripts\") pod \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.922960 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-combined-ca-bundle\") pod \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.922995 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-config-data\") pod \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.923038 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v68l6\" (UniqueName: \"kubernetes.io/projected/e41215ab-724d-4efa-a10c-b1573c1bc3c7-kube-api-access-v68l6\") pod \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\" (UID: \"e41215ab-724d-4efa-a10c-b1573c1bc3c7\") " Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.930078 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e41215ab-724d-4efa-a10c-b1573c1bc3c7-kube-api-access-v68l6" (OuterVolumeSpecName: "kube-api-access-v68l6") pod "e41215ab-724d-4efa-a10c-b1573c1bc3c7" (UID: "e41215ab-724d-4efa-a10c-b1573c1bc3c7"). InnerVolumeSpecName "kube-api-access-v68l6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.930070 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-scripts" (OuterVolumeSpecName: "scripts") pod "e41215ab-724d-4efa-a10c-b1573c1bc3c7" (UID: "e41215ab-724d-4efa-a10c-b1573c1bc3c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.953616 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-config-data" (OuterVolumeSpecName: "config-data") pod "e41215ab-724d-4efa-a10c-b1573c1bc3c7" (UID: "e41215ab-724d-4efa-a10c-b1573c1bc3c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:31 crc kubenswrapper[5032]: I0122 16:27:31.978034 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e41215ab-724d-4efa-a10c-b1573c1bc3c7" (UID: "e41215ab-724d-4efa-a10c-b1573c1bc3c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.024892 5032 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.024926 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.024941 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41215ab-724d-4efa-a10c-b1573c1bc3c7-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.024953 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v68l6\" (UniqueName: \"kubernetes.io/projected/e41215ab-724d-4efa-a10c-b1573c1bc3c7-kube-api-access-v68l6\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.450570 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tdfk2" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.451283 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tdfk2" event={"ID":"e41215ab-724d-4efa-a10c-b1573c1bc3c7","Type":"ContainerDied","Data":"8ab2a080ddabff0d7f3fc9dcaf530530a112dac200a610e311f10103f7beda63"} Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.451318 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ab2a080ddabff0d7f3fc9dcaf530530a112dac200a610e311f10103f7beda63" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.475774 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.654578 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.654806 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-log" containerID="cri-o://1937c950e46ed1d98161a557653d35af9901715466a2a682b56bb5bb37d5fdaf" gracePeriod=30 Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.654890 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-api" containerID="cri-o://9229e1baf859dec7eb701e61172616c606ef4dabbe32630b824cf38eefcb8c5e" gracePeriod=30 Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.685996 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.686200 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ce8e3b4d-99a8-43fe-8220-41a112314656" containerName="nova-scheduler-scheduler" containerID="cri-o://8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2" gracePeriod=30 Jan 22 16:27:32 crc kubenswrapper[5032]: I0122 16:27:32.742367 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.016884 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.016931 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.463941 5032 generic.go:334] "Generic (PLEG): container finished" podID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerID="9229e1baf859dec7eb701e61172616c606ef4dabbe32630b824cf38eefcb8c5e" exitCode=0 Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.464258 5032 generic.go:334] "Generic (PLEG): container finished" podID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerID="1937c950e46ed1d98161a557653d35af9901715466a2a682b56bb5bb37d5fdaf" exitCode=143 Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.464100 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a","Type":"ContainerDied","Data":"9229e1baf859dec7eb701e61172616c606ef4dabbe32630b824cf38eefcb8c5e"} Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.464350 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a","Type":"ContainerDied","Data":"1937c950e46ed1d98161a557653d35af9901715466a2a682b56bb5bb37d5fdaf"} Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.631208 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.758520 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-internal-tls-certs\") pod \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.758690 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-combined-ca-bundle\") pod \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.758736 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-logs\") pod \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.758774 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-public-tls-certs\") pod \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.758885 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-config-data\") pod \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.758916 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dw2rs\" (UniqueName: \"kubernetes.io/projected/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-kube-api-access-dw2rs\") pod \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\" (UID: \"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a\") " Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.759456 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-logs" (OuterVolumeSpecName: "logs") pod "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" (UID: "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.764993 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-kube-api-access-dw2rs" (OuterVolumeSpecName: "kube-api-access-dw2rs") pod "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" (UID: "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a"). InnerVolumeSpecName "kube-api-access-dw2rs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.790968 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" (UID: "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.793934 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-config-data" (OuterVolumeSpecName: "config-data") pod "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" (UID: "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.825943 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" (UID: "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.841191 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" (UID: "4b4a7b78-f4fa-4ccc-8f15-e3837817f16a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.860733 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.860766 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.860775 5032 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.860784 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.860793 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dw2rs\" (UniqueName: \"kubernetes.io/projected/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-kube-api-access-dw2rs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:33 crc kubenswrapper[5032]: I0122 16:27:33.860802 5032 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.478227 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b4a7b78-f4fa-4ccc-8f15-e3837817f16a","Type":"ContainerDied","Data":"536c8d95728a87cad7694389bcb48481d00e7fc7957fc950f728ac8a615dde3e"} Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.478281 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.478740 5032 scope.go:117] "RemoveContainer" containerID="9229e1baf859dec7eb701e61172616c606ef4dabbe32630b824cf38eefcb8c5e" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.478402 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-log" containerID="cri-o://cf04f36add102f7cac258558524de49e83ed544592df4f0a512d08cf888e0f15" gracePeriod=30 Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.478465 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-metadata" containerID="cri-o://7efb84d1b82111866f026c33af2d9539a002bdb2acf94c0544340f74c3eecd1e" gracePeriod=30 Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.533609 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.537808 5032 scope.go:117] "RemoveContainer" containerID="1937c950e46ed1d98161a557653d35af9901715466a2a682b56bb5bb37d5fdaf" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.550459 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.559513 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:34 crc kubenswrapper[5032]: E0122 16:27:34.559956 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-api" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.559977 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-api" Jan 22 16:27:34 crc kubenswrapper[5032]: E0122 16:27:34.560009 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-log" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.560015 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-log" Jan 22 16:27:34 crc kubenswrapper[5032]: E0122 16:27:34.560024 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e41215ab-724d-4efa-a10c-b1573c1bc3c7" containerName="nova-manage" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.560030 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e41215ab-724d-4efa-a10c-b1573c1bc3c7" containerName="nova-manage" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.560217 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-log" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.560236 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" containerName="nova-api-api" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.560256 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="e41215ab-724d-4efa-a10c-b1573c1bc3c7" containerName="nova-manage" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.567630 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.571810 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.574154 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.574353 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.574565 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.677693 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-config-data\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.677767 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ed63ebf-4302-41ea-8f32-2536281097d5-logs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.677842 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.677963 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qwqt\" (UniqueName: \"kubernetes.io/projected/6ed63ebf-4302-41ea-8f32-2536281097d5-kube-api-access-6qwqt\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.678094 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.678204 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-public-tls-certs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.781430 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.781565 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qwqt\" (UniqueName: \"kubernetes.io/projected/6ed63ebf-4302-41ea-8f32-2536281097d5-kube-api-access-6qwqt\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.781650 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.781754 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-public-tls-certs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.782075 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-config-data\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.782131 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ed63ebf-4302-41ea-8f32-2536281097d5-logs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.782713 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ed63ebf-4302-41ea-8f32-2536281097d5-logs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.788197 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.790279 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-config-data\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.791494 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.798793 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed63ebf-4302-41ea-8f32-2536281097d5-public-tls-certs\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.799806 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qwqt\" (UniqueName: \"kubernetes.io/projected/6ed63ebf-4302-41ea-8f32-2536281097d5-kube-api-access-6qwqt\") pod \"nova-api-0\" (UID: \"6ed63ebf-4302-41ea-8f32-2536281097d5\") " pod="openstack/nova-api-0" Jan 22 16:27:34 crc kubenswrapper[5032]: I0122 16:27:34.928391 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 16:27:35 crc kubenswrapper[5032]: I0122 16:27:35.500192 5032 generic.go:334] "Generic (PLEG): container finished" podID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerID="cf04f36add102f7cac258558524de49e83ed544592df4f0a512d08cf888e0f15" exitCode=143 Jan 22 16:27:35 crc kubenswrapper[5032]: I0122 16:27:35.500282 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e","Type":"ContainerDied","Data":"cf04f36add102f7cac258558524de49e83ed544592df4f0a512d08cf888e0f15"} Jan 22 16:27:35 crc kubenswrapper[5032]: I0122 16:27:35.557777 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.468825 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b4a7b78-f4fa-4ccc-8f15-e3837817f16a" path="/var/lib/kubelet/pods/4b4a7b78-f4fa-4ccc-8f15-e3837817f16a/volumes" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.507186 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.528503 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6ed63ebf-4302-41ea-8f32-2536281097d5","Type":"ContainerStarted","Data":"c7346d742baa34ec2a594af8e3f6eb365b93033aee1a822d6adb62bbf3e45582"} Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.528540 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6ed63ebf-4302-41ea-8f32-2536281097d5","Type":"ContainerStarted","Data":"605a382fadfbd97aeedcde2834aa67abb9fe2f81a22e62fa33fc15ba6c514ca4"} Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.528550 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6ed63ebf-4302-41ea-8f32-2536281097d5","Type":"ContainerStarted","Data":"34d7f0ffd2c6d78a3a4d36aeea72a2e7913c4169ce74b85d20dbe3156b9904e6"} Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.530804 5032 generic.go:334] "Generic (PLEG): container finished" podID="ce8e3b4d-99a8-43fe-8220-41a112314656" containerID="8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2" exitCode=0 Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.530826 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ce8e3b4d-99a8-43fe-8220-41a112314656","Type":"ContainerDied","Data":"8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2"} Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.530840 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ce8e3b4d-99a8-43fe-8220-41a112314656","Type":"ContainerDied","Data":"9a75a646dbbeeb133adbc620dd81345edb417c5138975c0a6013295327f84567"} Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.530873 5032 scope.go:117] "RemoveContainer" containerID="8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.530954 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.593440 5032 scope.go:117] "RemoveContainer" containerID="8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2" Jan 22 16:27:36 crc kubenswrapper[5032]: E0122 16:27:36.594578 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2\": container with ID starting with 8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2 not found: ID does not exist" containerID="8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.594674 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2"} err="failed to get container status \"8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2\": rpc error: code = NotFound desc = could not find container \"8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2\": container with ID starting with 8fe3f0adf6dc2d4033b079c13dce43f520b0db4ede689b69fe4c2674d18d7bb2 not found: ID does not exist" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.621905 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-combined-ca-bundle\") pod \"ce8e3b4d-99a8-43fe-8220-41a112314656\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.621986 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-config-data\") pod \"ce8e3b4d-99a8-43fe-8220-41a112314656\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.622051 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xgh6\" (UniqueName: \"kubernetes.io/projected/ce8e3b4d-99a8-43fe-8220-41a112314656-kube-api-access-4xgh6\") pod \"ce8e3b4d-99a8-43fe-8220-41a112314656\" (UID: \"ce8e3b4d-99a8-43fe-8220-41a112314656\") " Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.632398 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.632378781 podStartE2EDuration="2.632378781s" podCreationTimestamp="2026-01-22 16:27:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:36.619900999 +0000 UTC m=+1360.432070030" watchObservedRunningTime="2026-01-22 16:27:36.632378781 +0000 UTC m=+1360.444547812" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.651114 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-config-data" (OuterVolumeSpecName: "config-data") pod "ce8e3b4d-99a8-43fe-8220-41a112314656" (UID: "ce8e3b4d-99a8-43fe-8220-41a112314656"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.698086 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce8e3b4d-99a8-43fe-8220-41a112314656" (UID: "ce8e3b4d-99a8-43fe-8220-41a112314656"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.698202 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce8e3b4d-99a8-43fe-8220-41a112314656-kube-api-access-4xgh6" (OuterVolumeSpecName: "kube-api-access-4xgh6") pod "ce8e3b4d-99a8-43fe-8220-41a112314656" (UID: "ce8e3b4d-99a8-43fe-8220-41a112314656"). InnerVolumeSpecName "kube-api-access-4xgh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.729043 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.729083 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3b4d-99a8-43fe-8220-41a112314656-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.729093 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xgh6\" (UniqueName: \"kubernetes.io/projected/ce8e3b4d-99a8-43fe-8220-41a112314656-kube-api-access-4xgh6\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.872451 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.882999 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.893096 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:27:36 crc kubenswrapper[5032]: E0122 16:27:36.893423 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce8e3b4d-99a8-43fe-8220-41a112314656" containerName="nova-scheduler-scheduler" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.893443 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce8e3b4d-99a8-43fe-8220-41a112314656" containerName="nova-scheduler-scheduler" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.893641 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce8e3b4d-99a8-43fe-8220-41a112314656" containerName="nova-scheduler-scheduler" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.894224 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.900609 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 16:27:36 crc kubenswrapper[5032]: I0122 16:27:36.917752 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.033559 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0805eb42-14fe-4cb1-9766-e269515f7d80-config-data\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.033842 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0805eb42-14fe-4cb1-9766-e269515f7d80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.034047 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl6tv\" (UniqueName: \"kubernetes.io/projected/0805eb42-14fe-4cb1-9766-e269515f7d80-kube-api-access-sl6tv\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.135937 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0805eb42-14fe-4cb1-9766-e269515f7d80-config-data\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.136073 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0805eb42-14fe-4cb1-9766-e269515f7d80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.136143 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl6tv\" (UniqueName: \"kubernetes.io/projected/0805eb42-14fe-4cb1-9766-e269515f7d80-kube-api-access-sl6tv\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.140617 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0805eb42-14fe-4cb1-9766-e269515f7d80-config-data\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.145763 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0805eb42-14fe-4cb1-9766-e269515f7d80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.154122 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl6tv\" (UniqueName: \"kubernetes.io/projected/0805eb42-14fe-4cb1-9766-e269515f7d80-kube-api-access-sl6tv\") pod \"nova-scheduler-0\" (UID: \"0805eb42-14fe-4cb1-9766-e269515f7d80\") " pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.212625 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.617101 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:40860->10.217.0.197:8775: read: connection reset by peer" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.617202 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:40858->10.217.0.197:8775: read: connection reset by peer" Jan 22 16:27:37 crc kubenswrapper[5032]: I0122 16:27:37.746699 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 16:27:37 crc kubenswrapper[5032]: W0122 16:27:37.800066 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0805eb42_14fe_4cb1_9766_e269515f7d80.slice/crio-18d4a9639e963c4499d6293256692ae16265b7de319261dc9423dbc4250e4957 WatchSource:0}: Error finding container 18d4a9639e963c4499d6293256692ae16265b7de319261dc9423dbc4250e4957: Status 404 returned error can't find the container with id 18d4a9639e963c4499d6293256692ae16265b7de319261dc9423dbc4250e4957 Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.490283 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce8e3b4d-99a8-43fe-8220-41a112314656" path="/var/lib/kubelet/pods/ce8e3b4d-99a8-43fe-8220-41a112314656/volumes" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.551263 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0805eb42-14fe-4cb1-9766-e269515f7d80","Type":"ContainerStarted","Data":"68a68eb8d4850dec84a6f192ea9cba80dda952dae76e8f91d969bc37c0ec19ff"} Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.551350 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0805eb42-14fe-4cb1-9766-e269515f7d80","Type":"ContainerStarted","Data":"18d4a9639e963c4499d6293256692ae16265b7de319261dc9423dbc4250e4957"} Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.560577 5032 generic.go:334] "Generic (PLEG): container finished" podID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerID="7efb84d1b82111866f026c33af2d9539a002bdb2acf94c0544340f74c3eecd1e" exitCode=0 Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.560637 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e","Type":"ContainerDied","Data":"7efb84d1b82111866f026c33af2d9539a002bdb2acf94c0544340f74c3eecd1e"} Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.560676 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e","Type":"ContainerDied","Data":"7f95a307a6d46200e7e2d7ab557272968ff779f1028e08064ae6d9498ea82aac"} Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.560698 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f95a307a6d46200e7e2d7ab557272968ff779f1028e08064ae6d9498ea82aac" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.586229 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.586202088 podStartE2EDuration="2.586202088s" podCreationTimestamp="2026-01-22 16:27:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:38.571986419 +0000 UTC m=+1362.384155480" watchObservedRunningTime="2026-01-22 16:27:38.586202088 +0000 UTC m=+1362.398371149" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.639511 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.771075 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slrwj\" (UniqueName: \"kubernetes.io/projected/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-kube-api-access-slrwj\") pod \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.771173 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-logs\") pod \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.771222 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-nova-metadata-tls-certs\") pod \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.771280 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-config-data\") pod \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.771349 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-combined-ca-bundle\") pod \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\" (UID: \"1eeaaff2-6bcd-4d32-a26a-5d5db88b905e\") " Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.771890 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-logs" (OuterVolumeSpecName: "logs") pod "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" (UID: "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.795240 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-kube-api-access-slrwj" (OuterVolumeSpecName: "kube-api-access-slrwj") pod "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" (UID: "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e"). InnerVolumeSpecName "kube-api-access-slrwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.806802 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-config-data" (OuterVolumeSpecName: "config-data") pod "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" (UID: "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.809138 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" (UID: "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.857126 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" (UID: "1eeaaff2-6bcd-4d32-a26a-5d5db88b905e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.873613 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slrwj\" (UniqueName: \"kubernetes.io/projected/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-kube-api-access-slrwj\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.873663 5032 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-logs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.873676 5032 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.873687 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:38 crc kubenswrapper[5032]: I0122 16:27:38.873701 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.576335 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.628131 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.636443 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.682812 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:39 crc kubenswrapper[5032]: E0122 16:27:39.683430 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-log" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.683454 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-log" Jan 22 16:27:39 crc kubenswrapper[5032]: E0122 16:27:39.683518 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-metadata" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.683531 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-metadata" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.683907 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-log" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.683962 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" containerName="nova-metadata-metadata" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.685655 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.689255 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.689716 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.700598 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.794090 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125c93f8-f508-4bd7-a52e-b98f2c3212d6-logs\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.794145 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.794166 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qfgz\" (UniqueName: \"kubernetes.io/projected/125c93f8-f508-4bd7-a52e-b98f2c3212d6-kube-api-access-6qfgz\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.794184 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.794284 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-config-data\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.896590 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125c93f8-f508-4bd7-a52e-b98f2c3212d6-logs\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.896711 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.896757 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qfgz\" (UniqueName: \"kubernetes.io/projected/125c93f8-f508-4bd7-a52e-b98f2c3212d6-kube-api-access-6qfgz\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.896797 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.897015 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-config-data\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.899137 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125c93f8-f508-4bd7-a52e-b98f2c3212d6-logs\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.903082 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.903804 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-config-data\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.904165 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125c93f8-f508-4bd7-a52e-b98f2c3212d6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:39 crc kubenswrapper[5032]: I0122 16:27:39.931289 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qfgz\" (UniqueName: \"kubernetes.io/projected/125c93f8-f508-4bd7-a52e-b98f2c3212d6-kube-api-access-6qfgz\") pod \"nova-metadata-0\" (UID: \"125c93f8-f508-4bd7-a52e-b98f2c3212d6\") " pod="openstack/nova-metadata-0" Jan 22 16:27:40 crc kubenswrapper[5032]: I0122 16:27:40.015513 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 16:27:40 crc kubenswrapper[5032]: I0122 16:27:40.474981 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1eeaaff2-6bcd-4d32-a26a-5d5db88b905e" path="/var/lib/kubelet/pods/1eeaaff2-6bcd-4d32-a26a-5d5db88b905e/volumes" Jan 22 16:27:40 crc kubenswrapper[5032]: I0122 16:27:40.482769 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 16:27:40 crc kubenswrapper[5032]: W0122 16:27:40.486374 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod125c93f8_f508_4bd7_a52e_b98f2c3212d6.slice/crio-e8b42507fcc4602e8362051f61760bb1f2f524bc2c083957989b45582ec60f3b WatchSource:0}: Error finding container e8b42507fcc4602e8362051f61760bb1f2f524bc2c083957989b45582ec60f3b: Status 404 returned error can't find the container with id e8b42507fcc4602e8362051f61760bb1f2f524bc2c083957989b45582ec60f3b Jan 22 16:27:40 crc kubenswrapper[5032]: I0122 16:27:40.586235 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"125c93f8-f508-4bd7-a52e-b98f2c3212d6","Type":"ContainerStarted","Data":"e8b42507fcc4602e8362051f61760bb1f2f524bc2c083957989b45582ec60f3b"} Jan 22 16:27:41 crc kubenswrapper[5032]: I0122 16:27:41.607931 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"125c93f8-f508-4bd7-a52e-b98f2c3212d6","Type":"ContainerStarted","Data":"e69661da9f5ba398e7974396135d3e26854b42e8bdda5be7205626959a889b72"} Jan 22 16:27:41 crc kubenswrapper[5032]: I0122 16:27:41.608847 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"125c93f8-f508-4bd7-a52e-b98f2c3212d6","Type":"ContainerStarted","Data":"b8bdfc5e822d5246bd55ba5e8b4e4f1f71b3fdc2639baf88ad665222a1e8ad05"} Jan 22 16:27:41 crc kubenswrapper[5032]: I0122 16:27:41.637954 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.6379265309999997 podStartE2EDuration="2.637926531s" podCreationTimestamp="2026-01-22 16:27:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:27:41.630400381 +0000 UTC m=+1365.442569412" watchObservedRunningTime="2026-01-22 16:27:41.637926531 +0000 UTC m=+1365.450095572" Jan 22 16:27:42 crc kubenswrapper[5032]: I0122 16:27:42.214261 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 16:27:44 crc kubenswrapper[5032]: I0122 16:27:44.928794 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 16:27:44 crc kubenswrapper[5032]: I0122 16:27:44.929238 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 16:27:45 crc kubenswrapper[5032]: I0122 16:27:45.016047 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 16:27:45 crc kubenswrapper[5032]: I0122 16:27:45.016114 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 16:27:45 crc kubenswrapper[5032]: I0122 16:27:45.945064 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6ed63ebf-4302-41ea-8f32-2536281097d5" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:45 crc kubenswrapper[5032]: I0122 16:27:45.945441 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6ed63ebf-4302-41ea-8f32-2536281097d5" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:47 crc kubenswrapper[5032]: I0122 16:27:47.215143 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 16:27:47 crc kubenswrapper[5032]: I0122 16:27:47.250035 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 16:27:47 crc kubenswrapper[5032]: I0122 16:27:47.702844 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 16:27:50 crc kubenswrapper[5032]: I0122 16:27:50.016473 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 16:27:50 crc kubenswrapper[5032]: I0122 16:27:50.017459 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 16:27:50 crc kubenswrapper[5032]: I0122 16:27:50.716173 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 16:27:51 crc kubenswrapper[5032]: I0122 16:27:51.031063 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="125c93f8-f508-4bd7-a52e-b98f2c3212d6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:51 crc kubenswrapper[5032]: I0122 16:27:51.031077 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="125c93f8-f508-4bd7-a52e-b98f2c3212d6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 16:27:54 crc kubenswrapper[5032]: I0122 16:27:54.940681 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 16:27:54 crc kubenswrapper[5032]: I0122 16:27:54.941821 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 16:27:54 crc kubenswrapper[5032]: I0122 16:27:54.943596 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 16:27:54 crc kubenswrapper[5032]: I0122 16:27:54.953954 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 16:27:55 crc kubenswrapper[5032]: I0122 16:27:55.732307 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 16:27:55 crc kubenswrapper[5032]: I0122 16:27:55.744538 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 16:28:00 crc kubenswrapper[5032]: I0122 16:28:00.029397 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 16:28:00 crc kubenswrapper[5032]: I0122 16:28:00.034566 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 16:28:00 crc kubenswrapper[5032]: I0122 16:28:00.037668 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 16:28:00 crc kubenswrapper[5032]: I0122 16:28:00.809521 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.017215 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.017673 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.017733 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.018537 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"faf935adc77a3c0abaa2d5b7ee2ef0557447fb464c98230ba8a8d7f0657845b1"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.018842 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://faf935adc77a3c0abaa2d5b7ee2ef0557447fb464c98230ba8a8d7f0657845b1" gracePeriod=600 Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.848603 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="faf935adc77a3c0abaa2d5b7ee2ef0557447fb464c98230ba8a8d7f0657845b1" exitCode=0 Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.848715 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"faf935adc77a3c0abaa2d5b7ee2ef0557447fb464c98230ba8a8d7f0657845b1"} Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.849069 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403"} Jan 22 16:28:03 crc kubenswrapper[5032]: I0122 16:28:03.849103 5032 scope.go:117] "RemoveContainer" containerID="0e07bad5ddfef4b3b16bdde98bfadfd138cd9c8946f3f1a8c285fa09c7eb7871" Jan 22 16:28:09 crc kubenswrapper[5032]: I0122 16:28:09.588512 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:28:10 crc kubenswrapper[5032]: I0122 16:28:10.300632 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.219055 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gcpgh"] Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.222239 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.248385 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gcpgh"] Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.274563 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwhrx\" (UniqueName: \"kubernetes.io/projected/33f75102-4d30-4a3d-99b9-0d2b930fe494-kube-api-access-qwhrx\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.274638 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-catalog-content\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.274659 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-utilities\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.376076 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwhrx\" (UniqueName: \"kubernetes.io/projected/33f75102-4d30-4a3d-99b9-0d2b930fe494-kube-api-access-qwhrx\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.376140 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-catalog-content\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.376158 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-utilities\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.376599 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-utilities\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.376699 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-catalog-content\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.407815 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwhrx\" (UniqueName: \"kubernetes.io/projected/33f75102-4d30-4a3d-99b9-0d2b930fe494-kube-api-access-qwhrx\") pod \"redhat-operators-gcpgh\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:11 crc kubenswrapper[5032]: I0122 16:28:11.539037 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:12 crc kubenswrapper[5032]: I0122 16:28:12.108897 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gcpgh"] Jan 22 16:28:12 crc kubenswrapper[5032]: W0122 16:28:12.133037 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33f75102_4d30_4a3d_99b9_0d2b930fe494.slice/crio-855bab15a966c18e540e151fe97c1a3e9ceccccb3bf0c016fdd83d47dacfff55 WatchSource:0}: Error finding container 855bab15a966c18e540e151fe97c1a3e9ceccccb3bf0c016fdd83d47dacfff55: Status 404 returned error can't find the container with id 855bab15a966c18e540e151fe97c1a3e9ceccccb3bf0c016fdd83d47dacfff55 Jan 22 16:28:12 crc kubenswrapper[5032]: I0122 16:28:12.936816 5032 generic.go:334] "Generic (PLEG): container finished" podID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerID="142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4" exitCode=0 Jan 22 16:28:12 crc kubenswrapper[5032]: I0122 16:28:12.936893 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gcpgh" event={"ID":"33f75102-4d30-4a3d-99b9-0d2b930fe494","Type":"ContainerDied","Data":"142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4"} Jan 22 16:28:12 crc kubenswrapper[5032]: I0122 16:28:12.937091 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gcpgh" event={"ID":"33f75102-4d30-4a3d-99b9-0d2b930fe494","Type":"ContainerStarted","Data":"855bab15a966c18e540e151fe97c1a3e9ceccccb3bf0c016fdd83d47dacfff55"} Jan 22 16:28:13 crc kubenswrapper[5032]: I0122 16:28:13.803287 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerName="rabbitmq" containerID="cri-o://7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0" gracePeriod=604796 Jan 22 16:28:14 crc kubenswrapper[5032]: I0122 16:28:14.618759 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerName="rabbitmq" containerID="cri-o://4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657" gracePeriod=604796 Jan 22 16:28:14 crc kubenswrapper[5032]: I0122 16:28:14.958438 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gcpgh" event={"ID":"33f75102-4d30-4a3d-99b9-0d2b930fe494","Type":"ContainerStarted","Data":"a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136"} Jan 22 16:28:16 crc kubenswrapper[5032]: I0122 16:28:16.983946 5032 generic.go:334] "Generic (PLEG): container finished" podID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerID="a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136" exitCode=0 Jan 22 16:28:16 crc kubenswrapper[5032]: I0122 16:28:16.984129 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gcpgh" event={"ID":"33f75102-4d30-4a3d-99b9-0d2b930fe494","Type":"ContainerDied","Data":"a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136"} Jan 22 16:28:17 crc kubenswrapper[5032]: I0122 16:28:17.996168 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gcpgh" event={"ID":"33f75102-4d30-4a3d-99b9-0d2b930fe494","Type":"ContainerStarted","Data":"71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4"} Jan 22 16:28:18 crc kubenswrapper[5032]: I0122 16:28:18.023240 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gcpgh" podStartSLOduration=2.5129455849999998 podStartE2EDuration="7.023224591s" podCreationTimestamp="2026-01-22 16:28:11 +0000 UTC" firstStartedPulling="2026-01-22 16:28:12.938824769 +0000 UTC m=+1396.750993800" lastFinishedPulling="2026-01-22 16:28:17.449103755 +0000 UTC m=+1401.261272806" observedRunningTime="2026-01-22 16:28:18.016196563 +0000 UTC m=+1401.828365594" watchObservedRunningTime="2026-01-22 16:28:18.023224591 +0000 UTC m=+1401.835393622" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.424947 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.606836 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-confd\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.606917 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-tls\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.606964 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-erlang-cookie\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.606989 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-plugins-conf\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607030 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-config-data\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607122 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-pod-info\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607165 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8q2f\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-kube-api-access-j8q2f\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607226 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-erlang-cookie-secret\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607290 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-server-conf\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607321 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-plugins\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607341 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\" (UID: \"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b\") " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607567 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607721 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607829 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.607848 5032 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.608022 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.615185 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.618230 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-pod-info" (OuterVolumeSpecName: "pod-info") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.631964 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "persistence") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.645382 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-config-data" (OuterVolumeSpecName: "config-data") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.650128 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.663028 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-kube-api-access-j8q2f" (OuterVolumeSpecName: "kube-api-access-j8q2f") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "kube-api-access-j8q2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.698397 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-server-conf" (OuterVolumeSpecName: "server-conf") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709340 5032 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709380 5032 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709391 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709423 5032 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709435 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709447 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709457 5032 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.709469 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8q2f\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-kube-api-access-j8q2f\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.739172 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" (UID: "c5cc5e7f-418c-45c1-91b2-b994c5d47e1b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.739310 5032 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.811162 5032 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:20 crc kubenswrapper[5032]: I0122 16:28:20.811191 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.023874 5032 generic.go:334] "Generic (PLEG): container finished" podID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerID="7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0" exitCode=0 Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.023917 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b","Type":"ContainerDied","Data":"7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0"} Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.023943 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c5cc5e7f-418c-45c1-91b2-b994c5d47e1b","Type":"ContainerDied","Data":"19eff6f7ac235b07d05807e0b7a2b0ee3d992fdf3a8eba6e85788ebb0e6106c0"} Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.023960 5032 scope.go:117] "RemoveContainer" containerID="7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.023979 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.090020 5032 scope.go:117] "RemoveContainer" containerID="8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.098519 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.115140 5032 scope.go:117] "RemoveContainer" containerID="7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0" Jan 22 16:28:21 crc kubenswrapper[5032]: E0122 16:28:21.116601 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0\": container with ID starting with 7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0 not found: ID does not exist" containerID="7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.116645 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0"} err="failed to get container status \"7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0\": rpc error: code = NotFound desc = could not find container \"7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0\": container with ID starting with 7304155b69e1e15b51fef23f12e558fe0eba2af572f781c1499f02772e9b21c0 not found: ID does not exist" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.116678 5032 scope.go:117] "RemoveContainer" containerID="8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496" Jan 22 16:28:21 crc kubenswrapper[5032]: E0122 16:28:21.119854 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496\": container with ID starting with 8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496 not found: ID does not exist" containerID="8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.119932 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496"} err="failed to get container status \"8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496\": rpc error: code = NotFound desc = could not find container \"8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496\": container with ID starting with 8ad4ee0bb574bc4bb1cc577b0d9242643e57b30717749a3dc98513c7be7eb496 not found: ID does not exist" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.129244 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.158071 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:28:21 crc kubenswrapper[5032]: E0122 16:28:21.158600 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerName="rabbitmq" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.158624 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerName="rabbitmq" Jan 22 16:28:21 crc kubenswrapper[5032]: E0122 16:28:21.158657 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerName="setup-container" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.158668 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerName="setup-container" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.158894 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" containerName="rabbitmq" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.160210 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.168574 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.168749 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-46qls" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.169232 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.169421 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.169553 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.169646 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.169758 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.193420 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.322840 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323054 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-config-data\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323201 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323333 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323458 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323569 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323669 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323783 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pq75\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-kube-api-access-8pq75\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323896 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.323989 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.324079 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.425806 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.425934 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.425953 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.425987 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pq75\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-kube-api-access-8pq75\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426019 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426041 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426058 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426098 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426137 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-config-data\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426159 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426187 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426366 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426563 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.426671 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.428177 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.428722 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.428815 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-config-data\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.432061 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.432074 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.432139 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.433423 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.449981 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pq75\" (UniqueName: \"kubernetes.io/projected/d91220a8-ad5e-4caf-872f-b4f7e059ae5a-kube-api-access-8pq75\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.477745 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d91220a8-ad5e-4caf-872f-b4f7e059ae5a\") " pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.505397 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.539298 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.540267 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:21 crc kubenswrapper[5032]: I0122 16:28:21.998258 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 16:28:22 crc kubenswrapper[5032]: I0122 16:28:22.045877 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d91220a8-ad5e-4caf-872f-b4f7e059ae5a","Type":"ContainerStarted","Data":"24184f5a0030a57941155424dfe12a3fa904660d56a24fa2719f64c41ee94a69"} Jan 22 16:28:22 crc kubenswrapper[5032]: I0122 16:28:22.463597 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5cc5e7f-418c-45c1-91b2-b994c5d47e1b" path="/var/lib/kubelet/pods/c5cc5e7f-418c-45c1-91b2-b994c5d47e1b/volumes" Jan 22 16:28:22 crc kubenswrapper[5032]: I0122 16:28:22.594031 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Jan 22 16:28:22 crc kubenswrapper[5032]: I0122 16:28:22.605975 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gcpgh" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="registry-server" probeResult="failure" output=< Jan 22 16:28:22 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 16:28:22 crc kubenswrapper[5032]: > Jan 22 16:28:23 crc kubenswrapper[5032]: I0122 16:28:23.964644 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.067173 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d91220a8-ad5e-4caf-872f-b4f7e059ae5a","Type":"ContainerStarted","Data":"5e35b6891d0aedb2e1e1917a411d86f35bc7db98b0da5efd11775c04ae734e7c"} Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.070621 5032 generic.go:334] "Generic (PLEG): container finished" podID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerID="4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657" exitCode=0 Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.070666 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c40041cb-9206-4776-98ee-30a6c1cf5a85","Type":"ContainerDied","Data":"4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657"} Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.070692 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c40041cb-9206-4776-98ee-30a6c1cf5a85","Type":"ContainerDied","Data":"b41e42ee33612162f8ca368f621b9b93c6a71cd227ab4a6ecd2dadcb86aeccda"} Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.070710 5032 scope.go:117] "RemoveContainer" containerID="4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.070825 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084088 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c40041cb-9206-4776-98ee-30a6c1cf5a85-erlang-cookie-secret\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084157 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084183 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-server-conf\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084218 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-confd\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084243 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-tls\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084269 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-plugins-conf\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084322 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c40041cb-9206-4776-98ee-30a6c1cf5a85-pod-info\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084350 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtk9n\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-kube-api-access-vtk9n\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084461 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-plugins\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084521 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-erlang-cookie\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.084568 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-config-data\") pod \"c40041cb-9206-4776-98ee-30a6c1cf5a85\" (UID: \"c40041cb-9206-4776-98ee-30a6c1cf5a85\") " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.085117 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.085314 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.085619 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.089726 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.091155 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-kube-api-access-vtk9n" (OuterVolumeSpecName: "kube-api-access-vtk9n") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "kube-api-access-vtk9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.093483 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c40041cb-9206-4776-98ee-30a6c1cf5a85-pod-info" (OuterVolumeSpecName: "pod-info") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.093571 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.107109 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c40041cb-9206-4776-98ee-30a6c1cf5a85-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.109133 5032 scope.go:117] "RemoveContainer" containerID="4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.139345 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-config-data" (OuterVolumeSpecName: "config-data") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.141594 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-server-conf" (OuterVolumeSpecName: "server-conf") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187598 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187646 5032 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187658 5032 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c40041cb-9206-4776-98ee-30a6c1cf5a85-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187671 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtk9n\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-kube-api-access-vtk9n\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187697 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187711 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187725 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187737 5032 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c40041cb-9206-4776-98ee-30a6c1cf5a85-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187770 5032 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.187781 5032 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c40041cb-9206-4776-98ee-30a6c1cf5a85-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.201515 5032 scope.go:117] "RemoveContainer" containerID="4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657" Jan 22 16:28:24 crc kubenswrapper[5032]: E0122 16:28:24.202047 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657\": container with ID starting with 4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657 not found: ID does not exist" containerID="4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.202147 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657"} err="failed to get container status \"4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657\": rpc error: code = NotFound desc = could not find container \"4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657\": container with ID starting with 4d3d9d7eea00a8ef0042680c8702f8d36d52bc3e6f3fd664141c62bd2c33d657 not found: ID does not exist" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.202237 5032 scope.go:117] "RemoveContainer" containerID="4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76" Jan 22 16:28:24 crc kubenswrapper[5032]: E0122 16:28:24.202621 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76\": container with ID starting with 4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76 not found: ID does not exist" containerID="4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.202678 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76"} err="failed to get container status \"4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76\": rpc error: code = NotFound desc = could not find container \"4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76\": container with ID starting with 4481b4c4889f1c1a677cc770de66341b21fea63cc75e131c654a00d61a8e6f76 not found: ID does not exist" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.220825 5032 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.226957 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c40041cb-9206-4776-98ee-30a6c1cf5a85" (UID: "c40041cb-9206-4776-98ee-30a6c1cf5a85"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.289335 5032 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.289367 5032 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c40041cb-9206-4776-98ee-30a6c1cf5a85-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.402354 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.416885 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.427576 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:28:24 crc kubenswrapper[5032]: E0122 16:28:24.428067 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerName="setup-container" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.428094 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerName="setup-container" Jan 22 16:28:24 crc kubenswrapper[5032]: E0122 16:28:24.428125 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerName="rabbitmq" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.428134 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerName="rabbitmq" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.428379 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" containerName="rabbitmq" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.429485 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.431844 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.432007 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-gskcv" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.432120 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.432163 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.432354 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.432497 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.433817 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.446002 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.466730 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c40041cb-9206-4776-98ee-30a6c1cf5a85" path="/var/lib/kubelet/pods/c40041cb-9206-4776-98ee-30a6c1cf5a85/volumes" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.593226 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.593323 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.593355 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.593715 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9c0805a3-4319-469b-8a2a-e53998fd9166-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.593847 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.593964 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.594022 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9c0805a3-4319-469b-8a2a-e53998fd9166-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.594123 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fcd4\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-kube-api-access-2fcd4\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.594188 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.594311 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.594378 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696067 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696112 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696167 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9c0805a3-4319-469b-8a2a-e53998fd9166-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696198 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696226 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696250 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9c0805a3-4319-469b-8a2a-e53998fd9166-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696273 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fcd4\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-kube-api-access-2fcd4\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696292 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696330 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696350 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696380 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696525 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696944 5032 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.696970 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.697539 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.697764 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.697889 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9c0805a3-4319-469b-8a2a-e53998fd9166-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.701326 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.701585 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.702261 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9c0805a3-4319-469b-8a2a-e53998fd9166-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.707491 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9c0805a3-4319-469b-8a2a-e53998fd9166-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.715554 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fcd4\" (UniqueName: \"kubernetes.io/projected/9c0805a3-4319-469b-8a2a-e53998fd9166-kube-api-access-2fcd4\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.730607 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9c0805a3-4319-469b-8a2a-e53998fd9166\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:24 crc kubenswrapper[5032]: I0122 16:28:24.752147 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.158608 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d558885bc-927dv"] Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.160067 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.169491 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.196110 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-927dv"] Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.208009 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.208059 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5fkd\" (UniqueName: \"kubernetes.io/projected/f050dac2-3682-4f31-ae31-05c50f3fb957-kube-api-access-r5fkd\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.208108 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-svc\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.208168 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.208214 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.208251 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.208270 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-config\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.262216 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.312014 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.312076 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5fkd\" (UniqueName: \"kubernetes.io/projected/f050dac2-3682-4f31-ae31-05c50f3fb957-kube-api-access-r5fkd\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.312116 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-svc\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.312188 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.312241 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.312276 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.312300 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-config\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.313237 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-config\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.313817 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.314616 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-svc\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.315574 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.315749 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.315755 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.337756 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5fkd\" (UniqueName: \"kubernetes.io/projected/f050dac2-3682-4f31-ae31-05c50f3fb957-kube-api-access-r5fkd\") pod \"dnsmasq-dns-d558885bc-927dv\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.508073 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:25 crc kubenswrapper[5032]: I0122 16:28:25.779284 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-927dv"] Jan 22 16:28:25 crc kubenswrapper[5032]: W0122 16:28:25.784383 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf050dac2_3682_4f31_ae31_05c50f3fb957.slice/crio-2852ffbeac2515579baa12c9dc4d866845e23f61dbc1ce9bb5479c2201364b83 WatchSource:0}: Error finding container 2852ffbeac2515579baa12c9dc4d866845e23f61dbc1ce9bb5479c2201364b83: Status 404 returned error can't find the container with id 2852ffbeac2515579baa12c9dc4d866845e23f61dbc1ce9bb5479c2201364b83 Jan 22 16:28:26 crc kubenswrapper[5032]: I0122 16:28:26.090060 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-927dv" event={"ID":"f050dac2-3682-4f31-ae31-05c50f3fb957","Type":"ContainerStarted","Data":"2852ffbeac2515579baa12c9dc4d866845e23f61dbc1ce9bb5479c2201364b83"} Jan 22 16:28:26 crc kubenswrapper[5032]: I0122 16:28:26.091470 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c0805a3-4319-469b-8a2a-e53998fd9166","Type":"ContainerStarted","Data":"bdf935380f357c9d1beaa12315b783e0187059f14154b6d97bc5d51843a56a65"} Jan 22 16:28:27 crc kubenswrapper[5032]: I0122 16:28:27.102200 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c0805a3-4319-469b-8a2a-e53998fd9166","Type":"ContainerStarted","Data":"3ee129f53efba4e7615ec8a15990d0e4a9e56607561f9ab146a29a92f7552f2a"} Jan 22 16:28:27 crc kubenswrapper[5032]: I0122 16:28:27.105586 5032 generic.go:334] "Generic (PLEG): container finished" podID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerID="8e199d84d3c3f741a9ced87ced398ef7caff8d36210e5207960e4d69783f4e85" exitCode=0 Jan 22 16:28:27 crc kubenswrapper[5032]: I0122 16:28:27.105649 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-927dv" event={"ID":"f050dac2-3682-4f31-ae31-05c50f3fb957","Type":"ContainerDied","Data":"8e199d84d3c3f741a9ced87ced398ef7caff8d36210e5207960e4d69783f4e85"} Jan 22 16:28:28 crc kubenswrapper[5032]: I0122 16:28:28.118507 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-927dv" event={"ID":"f050dac2-3682-4f31-ae31-05c50f3fb957","Type":"ContainerStarted","Data":"3c5fc3133973a633ee14790e283b9d72f8a4af85258e1a4146ffc55e0f666b4b"} Jan 22 16:28:28 crc kubenswrapper[5032]: I0122 16:28:28.149284 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d558885bc-927dv" podStartSLOduration=3.149261585 podStartE2EDuration="3.149261585s" podCreationTimestamp="2026-01-22 16:28:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:28:28.137156812 +0000 UTC m=+1411.949325883" watchObservedRunningTime="2026-01-22 16:28:28.149261585 +0000 UTC m=+1411.961430616" Jan 22 16:28:29 crc kubenswrapper[5032]: I0122 16:28:29.131942 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:31 crc kubenswrapper[5032]: I0122 16:28:31.624296 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:31 crc kubenswrapper[5032]: I0122 16:28:31.713662 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:31 crc kubenswrapper[5032]: I0122 16:28:31.883146 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gcpgh"] Jan 22 16:28:33 crc kubenswrapper[5032]: I0122 16:28:33.184886 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gcpgh" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="registry-server" containerID="cri-o://71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4" gracePeriod=2 Jan 22 16:28:33 crc kubenswrapper[5032]: I0122 16:28:33.778637 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:33 crc kubenswrapper[5032]: I0122 16:28:33.952792 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-catalog-content\") pod \"33f75102-4d30-4a3d-99b9-0d2b930fe494\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " Jan 22 16:28:33 crc kubenswrapper[5032]: I0122 16:28:33.953411 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwhrx\" (UniqueName: \"kubernetes.io/projected/33f75102-4d30-4a3d-99b9-0d2b930fe494-kube-api-access-qwhrx\") pod \"33f75102-4d30-4a3d-99b9-0d2b930fe494\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " Jan 22 16:28:33 crc kubenswrapper[5032]: I0122 16:28:33.953500 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-utilities\") pod \"33f75102-4d30-4a3d-99b9-0d2b930fe494\" (UID: \"33f75102-4d30-4a3d-99b9-0d2b930fe494\") " Jan 22 16:28:33 crc kubenswrapper[5032]: I0122 16:28:33.955582 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-utilities" (OuterVolumeSpecName: "utilities") pod "33f75102-4d30-4a3d-99b9-0d2b930fe494" (UID: "33f75102-4d30-4a3d-99b9-0d2b930fe494"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:28:33 crc kubenswrapper[5032]: I0122 16:28:33.960627 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33f75102-4d30-4a3d-99b9-0d2b930fe494-kube-api-access-qwhrx" (OuterVolumeSpecName: "kube-api-access-qwhrx") pod "33f75102-4d30-4a3d-99b9-0d2b930fe494" (UID: "33f75102-4d30-4a3d-99b9-0d2b930fe494"). InnerVolumeSpecName "kube-api-access-qwhrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.056910 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.057361 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwhrx\" (UniqueName: \"kubernetes.io/projected/33f75102-4d30-4a3d-99b9-0d2b930fe494-kube-api-access-qwhrx\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.079342 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "33f75102-4d30-4a3d-99b9-0d2b930fe494" (UID: "33f75102-4d30-4a3d-99b9-0d2b930fe494"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.160081 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33f75102-4d30-4a3d-99b9-0d2b930fe494-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.200498 5032 generic.go:334] "Generic (PLEG): container finished" podID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerID="71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4" exitCode=0 Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.200549 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gcpgh" event={"ID":"33f75102-4d30-4a3d-99b9-0d2b930fe494","Type":"ContainerDied","Data":"71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4"} Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.200588 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gcpgh" event={"ID":"33f75102-4d30-4a3d-99b9-0d2b930fe494","Type":"ContainerDied","Data":"855bab15a966c18e540e151fe97c1a3e9ceccccb3bf0c016fdd83d47dacfff55"} Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.200609 5032 scope.go:117] "RemoveContainer" containerID="71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.200638 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gcpgh" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.237073 5032 scope.go:117] "RemoveContainer" containerID="a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.258256 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gcpgh"] Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.270985 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gcpgh"] Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.277009 5032 scope.go:117] "RemoveContainer" containerID="142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.313349 5032 scope.go:117] "RemoveContainer" containerID="71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4" Jan 22 16:28:34 crc kubenswrapper[5032]: E0122 16:28:34.313842 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4\": container with ID starting with 71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4 not found: ID does not exist" containerID="71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.313932 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4"} err="failed to get container status \"71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4\": rpc error: code = NotFound desc = could not find container \"71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4\": container with ID starting with 71c07b3d02abddb87a08ac0deaaf98750eea40415c37bfef6cb10e1ff0706ae4 not found: ID does not exist" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.313960 5032 scope.go:117] "RemoveContainer" containerID="a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136" Jan 22 16:28:34 crc kubenswrapper[5032]: E0122 16:28:34.314430 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136\": container with ID starting with a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136 not found: ID does not exist" containerID="a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.314481 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136"} err="failed to get container status \"a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136\": rpc error: code = NotFound desc = could not find container \"a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136\": container with ID starting with a30950038a1891c2c751b0eaee38219bc7a41661028a71031fdeb6565b7a2136 not found: ID does not exist" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.314515 5032 scope.go:117] "RemoveContainer" containerID="142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4" Jan 22 16:28:34 crc kubenswrapper[5032]: E0122 16:28:34.314957 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4\": container with ID starting with 142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4 not found: ID does not exist" containerID="142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.315004 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4"} err="failed to get container status \"142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4\": rpc error: code = NotFound desc = could not find container \"142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4\": container with ID starting with 142cb131977e71a26e00cf730c43092b6b6a0941667e2240408a315c924c41c4 not found: ID does not exist" Jan 22 16:28:34 crc kubenswrapper[5032]: I0122 16:28:34.466944 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" path="/var/lib/kubelet/pods/33f75102-4d30-4a3d-99b9-0d2b930fe494/volumes" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.510137 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.583196 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-cbh77"] Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.583493 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" podUID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerName="dnsmasq-dns" containerID="cri-o://c4b16a7ac5e338cb6003d559f293b70da8617aaa35e4225a9f7531fe20a90e08" gracePeriod=10 Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.739762 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-9d2fh"] Jan 22 16:28:35 crc kubenswrapper[5032]: E0122 16:28:35.740583 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="extract-utilities" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.740628 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="extract-utilities" Jan 22 16:28:35 crc kubenswrapper[5032]: E0122 16:28:35.740652 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="registry-server" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.740661 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="registry-server" Jan 22 16:28:35 crc kubenswrapper[5032]: E0122 16:28:35.740675 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="extract-content" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.740684 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="extract-content" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.741001 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="33f75102-4d30-4a3d-99b9-0d2b930fe494" containerName="registry-server" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.742655 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.752512 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-9d2fh"] Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.795622 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-config\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.795740 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vswpc\" (UniqueName: \"kubernetes.io/projected/d59138da-36dc-47f0-81c0-cfc89b9a7203-kube-api-access-vswpc\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.795784 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.795802 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.795824 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.795927 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.796007 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.897927 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vswpc\" (UniqueName: \"kubernetes.io/projected/d59138da-36dc-47f0-81c0-cfc89b9a7203-kube-api-access-vswpc\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.898015 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.898042 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.898069 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.898113 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.898178 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.898232 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-config\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.899237 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.899265 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.899272 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-config\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.899750 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.900062 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.900363 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d59138da-36dc-47f0-81c0-cfc89b9a7203-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:35 crc kubenswrapper[5032]: I0122 16:28:35.927740 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vswpc\" (UniqueName: \"kubernetes.io/projected/d59138da-36dc-47f0-81c0-cfc89b9a7203-kube-api-access-vswpc\") pod \"dnsmasq-dns-78c64bc9c5-9d2fh\" (UID: \"d59138da-36dc-47f0-81c0-cfc89b9a7203\") " pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.070615 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.223447 5032 generic.go:334] "Generic (PLEG): container finished" podID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerID="c4b16a7ac5e338cb6003d559f293b70da8617aaa35e4225a9f7531fe20a90e08" exitCode=0 Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.223499 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" event={"ID":"f1f2adcd-427c-4135-a0a7-35e79338861d","Type":"ContainerDied","Data":"c4b16a7ac5e338cb6003d559f293b70da8617aaa35e4225a9f7531fe20a90e08"} Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.566993 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-9d2fh"] Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.570086 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:28:36 crc kubenswrapper[5032]: W0122 16:28:36.593723 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd59138da_36dc_47f0_81c0_cfc89b9a7203.slice/crio-31b7252f750fceb94916cb8c1c7a77c54ba8ad6b08ef9c1f6472c89e0fe3328b WatchSource:0}: Error finding container 31b7252f750fceb94916cb8c1c7a77c54ba8ad6b08ef9c1f6472c89e0fe3328b: Status 404 returned error can't find the container with id 31b7252f750fceb94916cb8c1c7a77c54ba8ad6b08ef9c1f6472c89e0fe3328b Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.611344 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mzbz\" (UniqueName: \"kubernetes.io/projected/f1f2adcd-427c-4135-a0a7-35e79338861d-kube-api-access-9mzbz\") pod \"f1f2adcd-427c-4135-a0a7-35e79338861d\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.611445 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-config\") pod \"f1f2adcd-427c-4135-a0a7-35e79338861d\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.611514 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-svc\") pod \"f1f2adcd-427c-4135-a0a7-35e79338861d\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.611585 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-swift-storage-0\") pod \"f1f2adcd-427c-4135-a0a7-35e79338861d\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.611709 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-sb\") pod \"f1f2adcd-427c-4135-a0a7-35e79338861d\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.611759 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb\") pod \"f1f2adcd-427c-4135-a0a7-35e79338861d\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.615267 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1f2adcd-427c-4135-a0a7-35e79338861d-kube-api-access-9mzbz" (OuterVolumeSpecName: "kube-api-access-9mzbz") pod "f1f2adcd-427c-4135-a0a7-35e79338861d" (UID: "f1f2adcd-427c-4135-a0a7-35e79338861d"). InnerVolumeSpecName "kube-api-access-9mzbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.665467 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f1f2adcd-427c-4135-a0a7-35e79338861d" (UID: "f1f2adcd-427c-4135-a0a7-35e79338861d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.680315 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f1f2adcd-427c-4135-a0a7-35e79338861d" (UID: "f1f2adcd-427c-4135-a0a7-35e79338861d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.682481 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f1f2adcd-427c-4135-a0a7-35e79338861d" (UID: "f1f2adcd-427c-4135-a0a7-35e79338861d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:36 crc kubenswrapper[5032]: E0122 16:28:36.685978 5032 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb podName:f1f2adcd-427c-4135-a0a7-35e79338861d nodeName:}" failed. No retries permitted until 2026-01-22 16:28:37.185955979 +0000 UTC m=+1420.998125010 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ovsdbserver-nb" (UniqueName: "kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb") pod "f1f2adcd-427c-4135-a0a7-35e79338861d" (UID: "f1f2adcd-427c-4135-a0a7-35e79338861d") : error deleting /var/lib/kubelet/pods/f1f2adcd-427c-4135-a0a7-35e79338861d/volume-subpaths: remove /var/lib/kubelet/pods/f1f2adcd-427c-4135-a0a7-35e79338861d/volume-subpaths: no such file or directory Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.686292 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-config" (OuterVolumeSpecName: "config") pod "f1f2adcd-427c-4135-a0a7-35e79338861d" (UID: "f1f2adcd-427c-4135-a0a7-35e79338861d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.715014 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.715336 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.715346 5032 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.715355 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:36 crc kubenswrapper[5032]: I0122 16:28:36.715364 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mzbz\" (UniqueName: \"kubernetes.io/projected/f1f2adcd-427c-4135-a0a7-35e79338861d-kube-api-access-9mzbz\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.228960 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb\") pod \"f1f2adcd-427c-4135-a0a7-35e79338861d\" (UID: \"f1f2adcd-427c-4135-a0a7-35e79338861d\") " Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.230895 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f1f2adcd-427c-4135-a0a7-35e79338861d" (UID: "f1f2adcd-427c-4135-a0a7-35e79338861d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.234050 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" event={"ID":"f1f2adcd-427c-4135-a0a7-35e79338861d","Type":"ContainerDied","Data":"136c83f3dddac64fc9f88602f1591b934bd1f6bc923e89f969757cfee706e63b"} Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.234096 5032 scope.go:117] "RemoveContainer" containerID="c4b16a7ac5e338cb6003d559f293b70da8617aaa35e4225a9f7531fe20a90e08" Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.234092 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-cbh77" Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.236158 5032 generic.go:334] "Generic (PLEG): container finished" podID="d59138da-36dc-47f0-81c0-cfc89b9a7203" containerID="e1fca006130410505d357c4d8894867c823e32357230bda19c329b171c132b33" exitCode=0 Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.236203 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" event={"ID":"d59138da-36dc-47f0-81c0-cfc89b9a7203","Type":"ContainerDied","Data":"e1fca006130410505d357c4d8894867c823e32357230bda19c329b171c132b33"} Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.236231 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" event={"ID":"d59138da-36dc-47f0-81c0-cfc89b9a7203","Type":"ContainerStarted","Data":"31b7252f750fceb94916cb8c1c7a77c54ba8ad6b08ef9c1f6472c89e0fe3328b"} Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.272912 5032 scope.go:117] "RemoveContainer" containerID="99170e72eecb54f889a817187f985008214abb520678f64a17cee4ac5ea849fc" Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.334595 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1f2adcd-427c-4135-a0a7-35e79338861d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.518014 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-cbh77"] Jan 22 16:28:37 crc kubenswrapper[5032]: I0122 16:28:37.530714 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-cbh77"] Jan 22 16:28:38 crc kubenswrapper[5032]: I0122 16:28:38.247489 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" event={"ID":"d59138da-36dc-47f0-81c0-cfc89b9a7203","Type":"ContainerStarted","Data":"070df0fb6356ed39cc460af47a6783902220056f05accab97b014505e4a35d48"} Jan 22 16:28:38 crc kubenswrapper[5032]: I0122 16:28:38.247845 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:38 crc kubenswrapper[5032]: I0122 16:28:38.279686 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" podStartSLOduration=3.279667195 podStartE2EDuration="3.279667195s" podCreationTimestamp="2026-01-22 16:28:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:28:38.275511944 +0000 UTC m=+1422.087680995" watchObservedRunningTime="2026-01-22 16:28:38.279667195 +0000 UTC m=+1422.091836226" Jan 22 16:28:38 crc kubenswrapper[5032]: I0122 16:28:38.472332 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1f2adcd-427c-4135-a0a7-35e79338861d" path="/var/lib/kubelet/pods/f1f2adcd-427c-4135-a0a7-35e79338861d/volumes" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.073139 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78c64bc9c5-9d2fh" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.170617 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-927dv"] Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.171244 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d558885bc-927dv" podUID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerName="dnsmasq-dns" containerID="cri-o://3c5fc3133973a633ee14790e283b9d72f8a4af85258e1a4146ffc55e0f666b4b" gracePeriod=10 Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.330576 5032 generic.go:334] "Generic (PLEG): container finished" podID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerID="3c5fc3133973a633ee14790e283b9d72f8a4af85258e1a4146ffc55e0f666b4b" exitCode=0 Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.330614 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-927dv" event={"ID":"f050dac2-3682-4f31-ae31-05c50f3fb957","Type":"ContainerDied","Data":"3c5fc3133973a633ee14790e283b9d72f8a4af85258e1a4146ffc55e0f666b4b"} Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.688919 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.724819 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-openstack-edpm-ipam\") pod \"f050dac2-3682-4f31-ae31-05c50f3fb957\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.724906 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-nb\") pod \"f050dac2-3682-4f31-ae31-05c50f3fb957\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.725097 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-sb\") pod \"f050dac2-3682-4f31-ae31-05c50f3fb957\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.725220 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5fkd\" (UniqueName: \"kubernetes.io/projected/f050dac2-3682-4f31-ae31-05c50f3fb957-kube-api-access-r5fkd\") pod \"f050dac2-3682-4f31-ae31-05c50f3fb957\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.725355 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-config\") pod \"f050dac2-3682-4f31-ae31-05c50f3fb957\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.726201 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-swift-storage-0\") pod \"f050dac2-3682-4f31-ae31-05c50f3fb957\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.726386 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-svc\") pod \"f050dac2-3682-4f31-ae31-05c50f3fb957\" (UID: \"f050dac2-3682-4f31-ae31-05c50f3fb957\") " Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.741266 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f050dac2-3682-4f31-ae31-05c50f3fb957-kube-api-access-r5fkd" (OuterVolumeSpecName: "kube-api-access-r5fkd") pod "f050dac2-3682-4f31-ae31-05c50f3fb957" (UID: "f050dac2-3682-4f31-ae31-05c50f3fb957"). InnerVolumeSpecName "kube-api-access-r5fkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.773538 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "f050dac2-3682-4f31-ae31-05c50f3fb957" (UID: "f050dac2-3682-4f31-ae31-05c50f3fb957"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.789132 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f050dac2-3682-4f31-ae31-05c50f3fb957" (UID: "f050dac2-3682-4f31-ae31-05c50f3fb957"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.791491 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f050dac2-3682-4f31-ae31-05c50f3fb957" (UID: "f050dac2-3682-4f31-ae31-05c50f3fb957"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.792800 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f050dac2-3682-4f31-ae31-05c50f3fb957" (UID: "f050dac2-3682-4f31-ae31-05c50f3fb957"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.814043 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f050dac2-3682-4f31-ae31-05c50f3fb957" (UID: "f050dac2-3682-4f31-ae31-05c50f3fb957"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.829661 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-config" (OuterVolumeSpecName: "config") pod "f050dac2-3682-4f31-ae31-05c50f3fb957" (UID: "f050dac2-3682-4f31-ae31-05c50f3fb957"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.839511 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.839544 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5fkd\" (UniqueName: \"kubernetes.io/projected/f050dac2-3682-4f31-ae31-05c50f3fb957-kube-api-access-r5fkd\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.839558 5032 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-config\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.839567 5032 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.839574 5032 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.839582 5032 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:46 crc kubenswrapper[5032]: I0122 16:28:46.839590 5032 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f050dac2-3682-4f31-ae31-05c50f3fb957-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 16:28:47 crc kubenswrapper[5032]: I0122 16:28:47.342885 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-927dv" event={"ID":"f050dac2-3682-4f31-ae31-05c50f3fb957","Type":"ContainerDied","Data":"2852ffbeac2515579baa12c9dc4d866845e23f61dbc1ce9bb5479c2201364b83"} Jan 22 16:28:47 crc kubenswrapper[5032]: I0122 16:28:47.343071 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-927dv" Jan 22 16:28:47 crc kubenswrapper[5032]: I0122 16:28:47.343080 5032 scope.go:117] "RemoveContainer" containerID="3c5fc3133973a633ee14790e283b9d72f8a4af85258e1a4146ffc55e0f666b4b" Jan 22 16:28:47 crc kubenswrapper[5032]: I0122 16:28:47.384257 5032 scope.go:117] "RemoveContainer" containerID="8e199d84d3c3f741a9ced87ced398ef7caff8d36210e5207960e4d69783f4e85" Jan 22 16:28:47 crc kubenswrapper[5032]: I0122 16:28:47.397900 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-927dv"] Jan 22 16:28:47 crc kubenswrapper[5032]: I0122 16:28:47.405337 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-927dv"] Jan 22 16:28:48 crc kubenswrapper[5032]: I0122 16:28:48.467044 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f050dac2-3682-4f31-ae31-05c50f3fb957" path="/var/lib/kubelet/pods/f050dac2-3682-4f31-ae31-05c50f3fb957/volumes" Jan 22 16:28:56 crc kubenswrapper[5032]: I0122 16:28:56.453531 5032 generic.go:334] "Generic (PLEG): container finished" podID="d91220a8-ad5e-4caf-872f-b4f7e059ae5a" containerID="5e35b6891d0aedb2e1e1917a411d86f35bc7db98b0da5efd11775c04ae734e7c" exitCode=0 Jan 22 16:28:56 crc kubenswrapper[5032]: I0122 16:28:56.471085 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d91220a8-ad5e-4caf-872f-b4f7e059ae5a","Type":"ContainerDied","Data":"5e35b6891d0aedb2e1e1917a411d86f35bc7db98b0da5efd11775c04ae734e7c"} Jan 22 16:28:57 crc kubenswrapper[5032]: I0122 16:28:57.466526 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d91220a8-ad5e-4caf-872f-b4f7e059ae5a","Type":"ContainerStarted","Data":"3111dbe8583546e174e0cce1fec8d73d800c8a3396cb8eea3b3829896a03755a"} Jan 22 16:28:57 crc kubenswrapper[5032]: I0122 16:28:57.467100 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 22 16:28:57 crc kubenswrapper[5032]: I0122 16:28:57.517372 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.517349677 podStartE2EDuration="36.517349677s" podCreationTimestamp="2026-01-22 16:28:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:28:57.512706013 +0000 UTC m=+1441.324875054" watchObservedRunningTime="2026-01-22 16:28:57.517349677 +0000 UTC m=+1441.329518718" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.274926 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf"] Jan 22 16:28:59 crc kubenswrapper[5032]: E0122 16:28:59.275339 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerName="init" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.275357 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerName="init" Jan 22 16:28:59 crc kubenswrapper[5032]: E0122 16:28:59.275375 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerName="dnsmasq-dns" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.275381 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerName="dnsmasq-dns" Jan 22 16:28:59 crc kubenswrapper[5032]: E0122 16:28:59.275398 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerName="init" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.275405 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerName="init" Jan 22 16:28:59 crc kubenswrapper[5032]: E0122 16:28:59.275438 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerName="dnsmasq-dns" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.275443 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerName="dnsmasq-dns" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.275622 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1f2adcd-427c-4135-a0a7-35e79338861d" containerName="dnsmasq-dns" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.275635 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f050dac2-3682-4f31-ae31-05c50f3fb957" containerName="dnsmasq-dns" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.276266 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.278346 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.278958 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.279165 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.279291 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.288872 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf"] Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.324530 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.324609 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84crt\" (UniqueName: \"kubernetes.io/projected/d559e797-49a2-4f7a-827c-d3d2c48563c9-kube-api-access-84crt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.324633 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.324650 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.426393 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84crt\" (UniqueName: \"kubernetes.io/projected/d559e797-49a2-4f7a-827c-d3d2c48563c9-kube-api-access-84crt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.426795 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.426828 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.428288 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.433643 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.434153 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.436203 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.444076 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84crt\" (UniqueName: \"kubernetes.io/projected/d559e797-49a2-4f7a-827c-d3d2c48563c9-kube-api-access-84crt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.484233 5032 generic.go:334] "Generic (PLEG): container finished" podID="9c0805a3-4319-469b-8a2a-e53998fd9166" containerID="3ee129f53efba4e7615ec8a15990d0e4a9e56607561f9ab146a29a92f7552f2a" exitCode=0 Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.484402 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c0805a3-4319-469b-8a2a-e53998fd9166","Type":"ContainerDied","Data":"3ee129f53efba4e7615ec8a15990d0e4a9e56607561f9ab146a29a92f7552f2a"} Jan 22 16:28:59 crc kubenswrapper[5032]: I0122 16:28:59.592986 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:29:00 crc kubenswrapper[5032]: W0122 16:29:00.182948 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd559e797_49a2_4f7a_827c_d3d2c48563c9.slice/crio-9b3b1ceb3f7f3d6d442d00ebbf729dfb98be584dee64b1b25cc132f29f7b8335 WatchSource:0}: Error finding container 9b3b1ceb3f7f3d6d442d00ebbf729dfb98be584dee64b1b25cc132f29f7b8335: Status 404 returned error can't find the container with id 9b3b1ceb3f7f3d6d442d00ebbf729dfb98be584dee64b1b25cc132f29f7b8335 Jan 22 16:29:00 crc kubenswrapper[5032]: I0122 16:29:00.185848 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf"] Jan 22 16:29:00 crc kubenswrapper[5032]: I0122 16:29:00.495378 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9c0805a3-4319-469b-8a2a-e53998fd9166","Type":"ContainerStarted","Data":"ada1a8c4bf63570390672aafa87a2209d5a8e91d792ea605c284c01b2812d71b"} Jan 22 16:29:00 crc kubenswrapper[5032]: I0122 16:29:00.496209 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:29:00 crc kubenswrapper[5032]: I0122 16:29:00.497577 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" event={"ID":"d559e797-49a2-4f7a-827c-d3d2c48563c9","Type":"ContainerStarted","Data":"9b3b1ceb3f7f3d6d442d00ebbf729dfb98be584dee64b1b25cc132f29f7b8335"} Jan 22 16:29:00 crc kubenswrapper[5032]: I0122 16:29:00.520624 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.520609071 podStartE2EDuration="36.520609071s" podCreationTimestamp="2026-01-22 16:28:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:29:00.516908472 +0000 UTC m=+1444.329077493" watchObservedRunningTime="2026-01-22 16:29:00.520609071 +0000 UTC m=+1444.332778102" Jan 22 16:29:11 crc kubenswrapper[5032]: I0122 16:29:11.508240 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 22 16:29:12 crc kubenswrapper[5032]: I0122 16:29:12.630435 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" event={"ID":"d559e797-49a2-4f7a-827c-d3d2c48563c9","Type":"ContainerStarted","Data":"74d36286393531edd3d58dc834747b00738273a0cd011eed11ca9439b40ac984"} Jan 22 16:29:12 crc kubenswrapper[5032]: I0122 16:29:12.656279 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" podStartSLOduration=2.015131821 podStartE2EDuration="13.656261155s" podCreationTimestamp="2026-01-22 16:28:59 +0000 UTC" firstStartedPulling="2026-01-22 16:29:00.186423614 +0000 UTC m=+1443.998592645" lastFinishedPulling="2026-01-22 16:29:11.827552948 +0000 UTC m=+1455.639721979" observedRunningTime="2026-01-22 16:29:12.651044606 +0000 UTC m=+1456.463213677" watchObservedRunningTime="2026-01-22 16:29:12.656261155 +0000 UTC m=+1456.468430186" Jan 22 16:29:14 crc kubenswrapper[5032]: I0122 16:29:14.754940 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 22 16:29:16 crc kubenswrapper[5032]: I0122 16:29:16.724720 5032 scope.go:117] "RemoveContainer" containerID="760773ef52722ec7e122618fd34ccd81d4622c147410b76d3aa1bf3524eaebb0" Jan 22 16:29:16 crc kubenswrapper[5032]: I0122 16:29:16.773417 5032 scope.go:117] "RemoveContainer" containerID="e985e3d1e26a7732fcb864c735e48a4fea64c973ee37f49d9a6688ef675375d3" Jan 22 16:29:24 crc kubenswrapper[5032]: I0122 16:29:24.765381 5032 generic.go:334] "Generic (PLEG): container finished" podID="d559e797-49a2-4f7a-827c-d3d2c48563c9" containerID="74d36286393531edd3d58dc834747b00738273a0cd011eed11ca9439b40ac984" exitCode=0 Jan 22 16:29:24 crc kubenswrapper[5032]: I0122 16:29:24.765423 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" event={"ID":"d559e797-49a2-4f7a-827c-d3d2c48563c9","Type":"ContainerDied","Data":"74d36286393531edd3d58dc834747b00738273a0cd011eed11ca9439b40ac984"} Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.226322 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.266112 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84crt\" (UniqueName: \"kubernetes.io/projected/d559e797-49a2-4f7a-827c-d3d2c48563c9-kube-api-access-84crt\") pod \"d559e797-49a2-4f7a-827c-d3d2c48563c9\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.266596 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-ssh-key-openstack-edpm-ipam\") pod \"d559e797-49a2-4f7a-827c-d3d2c48563c9\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.266698 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-repo-setup-combined-ca-bundle\") pod \"d559e797-49a2-4f7a-827c-d3d2c48563c9\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.266745 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-inventory\") pod \"d559e797-49a2-4f7a-827c-d3d2c48563c9\" (UID: \"d559e797-49a2-4f7a-827c-d3d2c48563c9\") " Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.277145 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d559e797-49a2-4f7a-827c-d3d2c48563c9-kube-api-access-84crt" (OuterVolumeSpecName: "kube-api-access-84crt") pod "d559e797-49a2-4f7a-827c-d3d2c48563c9" (UID: "d559e797-49a2-4f7a-827c-d3d2c48563c9"). InnerVolumeSpecName "kube-api-access-84crt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.277243 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "d559e797-49a2-4f7a-827c-d3d2c48563c9" (UID: "d559e797-49a2-4f7a-827c-d3d2c48563c9"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.307411 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-inventory" (OuterVolumeSpecName: "inventory") pod "d559e797-49a2-4f7a-827c-d3d2c48563c9" (UID: "d559e797-49a2-4f7a-827c-d3d2c48563c9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.320579 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d559e797-49a2-4f7a-827c-d3d2c48563c9" (UID: "d559e797-49a2-4f7a-827c-d3d2c48563c9"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.368499 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.368938 5032 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.368953 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d559e797-49a2-4f7a-827c-d3d2c48563c9-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.368966 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84crt\" (UniqueName: \"kubernetes.io/projected/d559e797-49a2-4f7a-827c-d3d2c48563c9-kube-api-access-84crt\") on node \"crc\" DevicePath \"\"" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.794843 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" event={"ID":"d559e797-49a2-4f7a-827c-d3d2c48563c9","Type":"ContainerDied","Data":"9b3b1ceb3f7f3d6d442d00ebbf729dfb98be584dee64b1b25cc132f29f7b8335"} Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.794928 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b3b1ceb3f7f3d6d442d00ebbf729dfb98be584dee64b1b25cc132f29f7b8335" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.794947 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.881850 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w"] Jan 22 16:29:26 crc kubenswrapper[5032]: E0122 16:29:26.882342 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d559e797-49a2-4f7a-827c-d3d2c48563c9" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.882381 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d559e797-49a2-4f7a-827c-d3d2c48563c9" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.882647 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d559e797-49a2-4f7a-827c-d3d2c48563c9" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.883469 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.885961 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.886571 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.887099 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.887943 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.909625 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w"] Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.980075 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.980151 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:26 crc kubenswrapper[5032]: I0122 16:29:26.980436 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wgxz\" (UniqueName: \"kubernetes.io/projected/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-kube-api-access-9wgxz\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:27 crc kubenswrapper[5032]: I0122 16:29:27.081501 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wgxz\" (UniqueName: \"kubernetes.io/projected/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-kube-api-access-9wgxz\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:27 crc kubenswrapper[5032]: I0122 16:29:27.081564 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:27 crc kubenswrapper[5032]: I0122 16:29:27.081597 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:27 crc kubenswrapper[5032]: I0122 16:29:27.091469 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:27 crc kubenswrapper[5032]: I0122 16:29:27.091733 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:27 crc kubenswrapper[5032]: I0122 16:29:27.100424 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wgxz\" (UniqueName: \"kubernetes.io/projected/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-kube-api-access-9wgxz\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-22g2w\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:27 crc kubenswrapper[5032]: I0122 16:29:27.209974 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:28 crc kubenswrapper[5032]: W0122 16:29:27.792980 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod564e7d8f_ca5b_46fe_9894_e27f6e2b0385.slice/crio-5c4d31398bd7e126acd7eb734f62eace4620b3fd7dc86a5507f7ea9a74e567e4 WatchSource:0}: Error finding container 5c4d31398bd7e126acd7eb734f62eace4620b3fd7dc86a5507f7ea9a74e567e4: Status 404 returned error can't find the container with id 5c4d31398bd7e126acd7eb734f62eace4620b3fd7dc86a5507f7ea9a74e567e4 Jan 22 16:29:28 crc kubenswrapper[5032]: I0122 16:29:27.799820 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w"] Jan 22 16:29:28 crc kubenswrapper[5032]: I0122 16:29:27.807288 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" event={"ID":"564e7d8f-ca5b-46fe-9894-e27f6e2b0385","Type":"ContainerStarted","Data":"5c4d31398bd7e126acd7eb734f62eace4620b3fd7dc86a5507f7ea9a74e567e4"} Jan 22 16:29:28 crc kubenswrapper[5032]: I0122 16:29:28.821048 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" event={"ID":"564e7d8f-ca5b-46fe-9894-e27f6e2b0385","Type":"ContainerStarted","Data":"50d14c6a7ef0a5b7ca6690adbf40deffd6c6190b5a95e24613083f3aa40775ae"} Jan 22 16:29:28 crc kubenswrapper[5032]: I0122 16:29:28.853474 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" podStartSLOduration=2.358282939 podStartE2EDuration="2.853441746s" podCreationTimestamp="2026-01-22 16:29:26 +0000 UTC" firstStartedPulling="2026-01-22 16:29:27.797947695 +0000 UTC m=+1471.610116766" lastFinishedPulling="2026-01-22 16:29:28.293106542 +0000 UTC m=+1472.105275573" observedRunningTime="2026-01-22 16:29:28.839941167 +0000 UTC m=+1472.652110268" watchObservedRunningTime="2026-01-22 16:29:28.853441746 +0000 UTC m=+1472.665610827" Jan 22 16:29:31 crc kubenswrapper[5032]: I0122 16:29:31.854930 5032 generic.go:334] "Generic (PLEG): container finished" podID="564e7d8f-ca5b-46fe-9894-e27f6e2b0385" containerID="50d14c6a7ef0a5b7ca6690adbf40deffd6c6190b5a95e24613083f3aa40775ae" exitCode=0 Jan 22 16:29:31 crc kubenswrapper[5032]: I0122 16:29:31.855020 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" event={"ID":"564e7d8f-ca5b-46fe-9894-e27f6e2b0385","Type":"ContainerDied","Data":"50d14c6a7ef0a5b7ca6690adbf40deffd6c6190b5a95e24613083f3aa40775ae"} Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.442125 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.530706 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wgxz\" (UniqueName: \"kubernetes.io/projected/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-kube-api-access-9wgxz\") pod \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.530963 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-ssh-key-openstack-edpm-ipam\") pod \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.531002 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-inventory\") pod \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\" (UID: \"564e7d8f-ca5b-46fe-9894-e27f6e2b0385\") " Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.538745 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-kube-api-access-9wgxz" (OuterVolumeSpecName: "kube-api-access-9wgxz") pod "564e7d8f-ca5b-46fe-9894-e27f6e2b0385" (UID: "564e7d8f-ca5b-46fe-9894-e27f6e2b0385"). InnerVolumeSpecName "kube-api-access-9wgxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.578055 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-inventory" (OuterVolumeSpecName: "inventory") pod "564e7d8f-ca5b-46fe-9894-e27f6e2b0385" (UID: "564e7d8f-ca5b-46fe-9894-e27f6e2b0385"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.578835 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "564e7d8f-ca5b-46fe-9894-e27f6e2b0385" (UID: "564e7d8f-ca5b-46fe-9894-e27f6e2b0385"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.634388 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.634442 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.634463 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wgxz\" (UniqueName: \"kubernetes.io/projected/564e7d8f-ca5b-46fe-9894-e27f6e2b0385-kube-api-access-9wgxz\") on node \"crc\" DevicePath \"\"" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.879653 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" event={"ID":"564e7d8f-ca5b-46fe-9894-e27f6e2b0385","Type":"ContainerDied","Data":"5c4d31398bd7e126acd7eb734f62eace4620b3fd7dc86a5507f7ea9a74e567e4"} Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.879699 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c4d31398bd7e126acd7eb734f62eace4620b3fd7dc86a5507f7ea9a74e567e4" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.879707 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-22g2w" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.975413 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl"] Jan 22 16:29:33 crc kubenswrapper[5032]: E0122 16:29:33.976249 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="564e7d8f-ca5b-46fe-9894-e27f6e2b0385" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.976280 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="564e7d8f-ca5b-46fe-9894-e27f6e2b0385" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.976645 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="564e7d8f-ca5b-46fe-9894-e27f6e2b0385" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.977648 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.981961 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.982056 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.982246 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.982410 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:29:33 crc kubenswrapper[5032]: I0122 16:29:33.994827 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl"] Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.048409 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.048661 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.048827 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.048971 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsnfz\" (UniqueName: \"kubernetes.io/projected/547c061b-8a80-410b-b177-46281b19211c-kube-api-access-qsnfz\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.149885 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.149957 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.150028 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.150070 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsnfz\" (UniqueName: \"kubernetes.io/projected/547c061b-8a80-410b-b177-46281b19211c-kube-api-access-qsnfz\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.155695 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.157217 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.165425 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.180447 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsnfz\" (UniqueName: \"kubernetes.io/projected/547c061b-8a80-410b-b177-46281b19211c-kube-api-access-qsnfz\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.309978 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:29:34 crc kubenswrapper[5032]: I0122 16:29:34.940553 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl"] Jan 22 16:29:34 crc kubenswrapper[5032]: W0122 16:29:34.953028 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod547c061b_8a80_410b_b177_46281b19211c.slice/crio-9ad45014940ab9745db7d93068e60b358548b9de87848e3c360a56ba0089329b WatchSource:0}: Error finding container 9ad45014940ab9745db7d93068e60b358548b9de87848e3c360a56ba0089329b: Status 404 returned error can't find the container with id 9ad45014940ab9745db7d93068e60b358548b9de87848e3c360a56ba0089329b Jan 22 16:29:35 crc kubenswrapper[5032]: I0122 16:29:35.908727 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" event={"ID":"547c061b-8a80-410b-b177-46281b19211c","Type":"ContainerStarted","Data":"86544a74ae28ff8e5ac35637e6b043c2dcecd86f21199f6ccddc46300b02a7ed"} Jan 22 16:29:35 crc kubenswrapper[5032]: I0122 16:29:35.909195 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" event={"ID":"547c061b-8a80-410b-b177-46281b19211c","Type":"ContainerStarted","Data":"9ad45014940ab9745db7d93068e60b358548b9de87848e3c360a56ba0089329b"} Jan 22 16:29:35 crc kubenswrapper[5032]: I0122 16:29:35.936484 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" podStartSLOduration=2.266386247 podStartE2EDuration="2.936458136s" podCreationTimestamp="2026-01-22 16:29:33 +0000 UTC" firstStartedPulling="2026-01-22 16:29:34.955244484 +0000 UTC m=+1478.767413525" lastFinishedPulling="2026-01-22 16:29:35.625316343 +0000 UTC m=+1479.437485414" observedRunningTime="2026-01-22 16:29:35.93099198 +0000 UTC m=+1479.743161041" watchObservedRunningTime="2026-01-22 16:29:35.936458136 +0000 UTC m=+1479.748627177" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.144002 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r"] Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.151452 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.154081 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.154885 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r"] Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.155561 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.241171 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-secret-volume\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.241548 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-config-volume\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.241637 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvv92\" (UniqueName: \"kubernetes.io/projected/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-kube-api-access-dvv92\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.343327 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-secret-volume\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.343431 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-config-volume\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.343548 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvv92\" (UniqueName: \"kubernetes.io/projected/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-kube-api-access-dvv92\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.344666 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-config-volume\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.360351 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-secret-volume\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.366877 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvv92\" (UniqueName: \"kubernetes.io/projected/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-kube-api-access-dvv92\") pod \"collect-profiles-29484990-rhb5r\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.481129 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:00 crc kubenswrapper[5032]: I0122 16:30:00.942191 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r"] Jan 22 16:30:00 crc kubenswrapper[5032]: W0122 16:30:00.948507 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf62a5ce0_c3e7_4db0_b367_0ac223d92c72.slice/crio-d2d62910e543836162b4abd1552d925fd4ea95e61d83a6cf6608a64ce4da650c WatchSource:0}: Error finding container d2d62910e543836162b4abd1552d925fd4ea95e61d83a6cf6608a64ce4da650c: Status 404 returned error can't find the container with id d2d62910e543836162b4abd1552d925fd4ea95e61d83a6cf6608a64ce4da650c Jan 22 16:30:01 crc kubenswrapper[5032]: I0122 16:30:01.208837 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" event={"ID":"f62a5ce0-c3e7-4db0-b367-0ac223d92c72","Type":"ContainerStarted","Data":"a623d89ad21f181856f7035dfc391e6de011d3ed6f667a5ae0c29e47dac0477f"} Jan 22 16:30:01 crc kubenswrapper[5032]: I0122 16:30:01.209395 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" event={"ID":"f62a5ce0-c3e7-4db0-b367-0ac223d92c72","Type":"ContainerStarted","Data":"d2d62910e543836162b4abd1552d925fd4ea95e61d83a6cf6608a64ce4da650c"} Jan 22 16:30:01 crc kubenswrapper[5032]: I0122 16:30:01.243020 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" podStartSLOduration=1.243000733 podStartE2EDuration="1.243000733s" podCreationTimestamp="2026-01-22 16:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:30:01.22901563 +0000 UTC m=+1505.041184661" watchObservedRunningTime="2026-01-22 16:30:01.243000733 +0000 UTC m=+1505.055169774" Jan 22 16:30:02 crc kubenswrapper[5032]: I0122 16:30:02.226023 5032 generic.go:334] "Generic (PLEG): container finished" podID="f62a5ce0-c3e7-4db0-b367-0ac223d92c72" containerID="a623d89ad21f181856f7035dfc391e6de011d3ed6f667a5ae0c29e47dac0477f" exitCode=0 Jan 22 16:30:02 crc kubenswrapper[5032]: I0122 16:30:02.226212 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" event={"ID":"f62a5ce0-c3e7-4db0-b367-0ac223d92c72","Type":"ContainerDied","Data":"a623d89ad21f181856f7035dfc391e6de011d3ed6f667a5ae0c29e47dac0477f"} Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.016425 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.016502 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.685010 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.817361 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvv92\" (UniqueName: \"kubernetes.io/projected/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-kube-api-access-dvv92\") pod \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.817582 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-config-volume\") pod \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.817704 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-secret-volume\") pod \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\" (UID: \"f62a5ce0-c3e7-4db0-b367-0ac223d92c72\") " Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.818394 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-config-volume" (OuterVolumeSpecName: "config-volume") pod "f62a5ce0-c3e7-4db0-b367-0ac223d92c72" (UID: "f62a5ce0-c3e7-4db0-b367-0ac223d92c72"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.823819 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f62a5ce0-c3e7-4db0-b367-0ac223d92c72" (UID: "f62a5ce0-c3e7-4db0-b367-0ac223d92c72"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.823845 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-kube-api-access-dvv92" (OuterVolumeSpecName: "kube-api-access-dvv92") pod "f62a5ce0-c3e7-4db0-b367-0ac223d92c72" (UID: "f62a5ce0-c3e7-4db0-b367-0ac223d92c72"). InnerVolumeSpecName "kube-api-access-dvv92". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.920302 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.920338 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:30:03 crc kubenswrapper[5032]: I0122 16:30:03.920353 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvv92\" (UniqueName: \"kubernetes.io/projected/f62a5ce0-c3e7-4db0-b367-0ac223d92c72-kube-api-access-dvv92\") on node \"crc\" DevicePath \"\"" Jan 22 16:30:04 crc kubenswrapper[5032]: I0122 16:30:04.248375 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" event={"ID":"f62a5ce0-c3e7-4db0-b367-0ac223d92c72","Type":"ContainerDied","Data":"d2d62910e543836162b4abd1552d925fd4ea95e61d83a6cf6608a64ce4da650c"} Jan 22 16:30:04 crc kubenswrapper[5032]: I0122 16:30:04.248419 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2d62910e543836162b4abd1552d925fd4ea95e61d83a6cf6608a64ce4da650c" Jan 22 16:30:04 crc kubenswrapper[5032]: I0122 16:30:04.248438 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r" Jan 22 16:30:16 crc kubenswrapper[5032]: I0122 16:30:16.952368 5032 scope.go:117] "RemoveContainer" containerID="174bfa085d80eeecb78b6cadbd42931be570bb7c8eb00a34d30cabd4b1a658a9" Jan 22 16:30:25 crc kubenswrapper[5032]: I0122 16:30:25.964786 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gdlt6"] Jan 22 16:30:25 crc kubenswrapper[5032]: E0122 16:30:25.966484 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f62a5ce0-c3e7-4db0-b367-0ac223d92c72" containerName="collect-profiles" Jan 22 16:30:25 crc kubenswrapper[5032]: I0122 16:30:25.966522 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f62a5ce0-c3e7-4db0-b367-0ac223d92c72" containerName="collect-profiles" Jan 22 16:30:25 crc kubenswrapper[5032]: I0122 16:30:25.967126 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f62a5ce0-c3e7-4db0-b367-0ac223d92c72" containerName="collect-profiles" Jan 22 16:30:25 crc kubenswrapper[5032]: I0122 16:30:25.970393 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:25 crc kubenswrapper[5032]: I0122 16:30:25.974405 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gdlt6"] Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.074168 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-utilities\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.074240 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-catalog-content\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.074395 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z852j\" (UniqueName: \"kubernetes.io/projected/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-kube-api-access-z852j\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.176177 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-catalog-content\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.176307 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z852j\" (UniqueName: \"kubernetes.io/projected/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-kube-api-access-z852j\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.176355 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-utilities\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.176745 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-utilities\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.176973 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-catalog-content\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.203989 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z852j\" (UniqueName: \"kubernetes.io/projected/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-kube-api-access-z852j\") pod \"community-operators-gdlt6\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.310229 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:26 crc kubenswrapper[5032]: I0122 16:30:26.835110 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gdlt6"] Jan 22 16:30:26 crc kubenswrapper[5032]: W0122 16:30:26.843017 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58a29045_233e_4b3d_a7d4_acbfcce2dc7a.slice/crio-5dc27b77ddbf6a9d1928437165d9e6a8efd0b291133009a07ef0671126158f4f WatchSource:0}: Error finding container 5dc27b77ddbf6a9d1928437165d9e6a8efd0b291133009a07ef0671126158f4f: Status 404 returned error can't find the container with id 5dc27b77ddbf6a9d1928437165d9e6a8efd0b291133009a07ef0671126158f4f Jan 22 16:30:27 crc kubenswrapper[5032]: I0122 16:30:27.525975 5032 generic.go:334] "Generic (PLEG): container finished" podID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerID="15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8" exitCode=0 Jan 22 16:30:27 crc kubenswrapper[5032]: I0122 16:30:27.526160 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdlt6" event={"ID":"58a29045-233e-4b3d-a7d4-acbfcce2dc7a","Type":"ContainerDied","Data":"15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8"} Jan 22 16:30:27 crc kubenswrapper[5032]: I0122 16:30:27.526427 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdlt6" event={"ID":"58a29045-233e-4b3d-a7d4-acbfcce2dc7a","Type":"ContainerStarted","Data":"5dc27b77ddbf6a9d1928437165d9e6a8efd0b291133009a07ef0671126158f4f"} Jan 22 16:30:28 crc kubenswrapper[5032]: I0122 16:30:28.536772 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdlt6" event={"ID":"58a29045-233e-4b3d-a7d4-acbfcce2dc7a","Type":"ContainerStarted","Data":"061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb"} Jan 22 16:30:29 crc kubenswrapper[5032]: I0122 16:30:29.552633 5032 generic.go:334] "Generic (PLEG): container finished" podID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerID="061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb" exitCode=0 Jan 22 16:30:29 crc kubenswrapper[5032]: I0122 16:30:29.552697 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdlt6" event={"ID":"58a29045-233e-4b3d-a7d4-acbfcce2dc7a","Type":"ContainerDied","Data":"061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb"} Jan 22 16:30:31 crc kubenswrapper[5032]: I0122 16:30:31.581122 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdlt6" event={"ID":"58a29045-233e-4b3d-a7d4-acbfcce2dc7a","Type":"ContainerStarted","Data":"002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61"} Jan 22 16:30:31 crc kubenswrapper[5032]: I0122 16:30:31.622262 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gdlt6" podStartSLOduration=3.51741882 podStartE2EDuration="6.622237339s" podCreationTimestamp="2026-01-22 16:30:25 +0000 UTC" firstStartedPulling="2026-01-22 16:30:27.530971999 +0000 UTC m=+1531.343141040" lastFinishedPulling="2026-01-22 16:30:30.635790488 +0000 UTC m=+1534.447959559" observedRunningTime="2026-01-22 16:30:31.613826425 +0000 UTC m=+1535.425995466" watchObservedRunningTime="2026-01-22 16:30:31.622237339 +0000 UTC m=+1535.434406380" Jan 22 16:30:33 crc kubenswrapper[5032]: I0122 16:30:33.016888 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:30:33 crc kubenswrapper[5032]: I0122 16:30:33.017269 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:30:36 crc kubenswrapper[5032]: I0122 16:30:36.311084 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:36 crc kubenswrapper[5032]: I0122 16:30:36.312513 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:36 crc kubenswrapper[5032]: I0122 16:30:36.377765 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:36 crc kubenswrapper[5032]: I0122 16:30:36.710716 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:36 crc kubenswrapper[5032]: I0122 16:30:36.780339 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gdlt6"] Jan 22 16:30:38 crc kubenswrapper[5032]: I0122 16:30:38.659050 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gdlt6" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="registry-server" containerID="cri-o://002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61" gracePeriod=2 Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.041753 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7brs8"] Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.048619 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.056134 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7brs8"] Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.177097 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-catalog-content\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.177145 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqx5q\" (UniqueName: \"kubernetes.io/projected/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-kube-api-access-qqx5q\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.177861 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-utilities\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.228130 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.279230 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-catalog-content\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.279302 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqx5q\" (UniqueName: \"kubernetes.io/projected/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-kube-api-access-qqx5q\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.279484 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-utilities\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.279973 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-catalog-content\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.279981 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-utilities\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.312055 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqx5q\" (UniqueName: \"kubernetes.io/projected/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-kube-api-access-qqx5q\") pod \"certified-operators-7brs8\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.376462 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.380636 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-utilities\") pod \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.380790 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z852j\" (UniqueName: \"kubernetes.io/projected/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-kube-api-access-z852j\") pod \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.381470 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-utilities" (OuterVolumeSpecName: "utilities") pod "58a29045-233e-4b3d-a7d4-acbfcce2dc7a" (UID: "58a29045-233e-4b3d-a7d4-acbfcce2dc7a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.381737 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-catalog-content\") pod \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\" (UID: \"58a29045-233e-4b3d-a7d4-acbfcce2dc7a\") " Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.382333 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.385510 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-kube-api-access-z852j" (OuterVolumeSpecName: "kube-api-access-z852j") pod "58a29045-233e-4b3d-a7d4-acbfcce2dc7a" (UID: "58a29045-233e-4b3d-a7d4-acbfcce2dc7a"). InnerVolumeSpecName "kube-api-access-z852j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.463696 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58a29045-233e-4b3d-a7d4-acbfcce2dc7a" (UID: "58a29045-233e-4b3d-a7d4-acbfcce2dc7a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.487713 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.487751 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z852j\" (UniqueName: \"kubernetes.io/projected/58a29045-233e-4b3d-a7d4-acbfcce2dc7a-kube-api-access-z852j\") on node \"crc\" DevicePath \"\"" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.675834 5032 generic.go:334] "Generic (PLEG): container finished" podID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerID="002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61" exitCode=0 Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.675884 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdlt6" event={"ID":"58a29045-233e-4b3d-a7d4-acbfcce2dc7a","Type":"ContainerDied","Data":"002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61"} Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.675909 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdlt6" event={"ID":"58a29045-233e-4b3d-a7d4-acbfcce2dc7a","Type":"ContainerDied","Data":"5dc27b77ddbf6a9d1928437165d9e6a8efd0b291133009a07ef0671126158f4f"} Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.675925 5032 scope.go:117] "RemoveContainer" containerID="002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.676069 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdlt6" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.710695 5032 scope.go:117] "RemoveContainer" containerID="061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.711046 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gdlt6"] Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.719244 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gdlt6"] Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.747576 5032 scope.go:117] "RemoveContainer" containerID="15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.766625 5032 scope.go:117] "RemoveContainer" containerID="002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61" Jan 22 16:30:39 crc kubenswrapper[5032]: E0122 16:30:39.767068 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61\": container with ID starting with 002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61 not found: ID does not exist" containerID="002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.767096 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61"} err="failed to get container status \"002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61\": rpc error: code = NotFound desc = could not find container \"002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61\": container with ID starting with 002a9dffed33cc9343815a442b27ebf9975859971a0ff63b20904bd46303df61 not found: ID does not exist" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.767116 5032 scope.go:117] "RemoveContainer" containerID="061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb" Jan 22 16:30:39 crc kubenswrapper[5032]: E0122 16:30:39.768604 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb\": container with ID starting with 061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb not found: ID does not exist" containerID="061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.768624 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb"} err="failed to get container status \"061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb\": rpc error: code = NotFound desc = could not find container \"061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb\": container with ID starting with 061cb9c6bd7ea10c28cd9fba365a8579294df8b7d1217921ea4192080594e6eb not found: ID does not exist" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.768638 5032 scope.go:117] "RemoveContainer" containerID="15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8" Jan 22 16:30:39 crc kubenswrapper[5032]: E0122 16:30:39.768949 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8\": container with ID starting with 15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8 not found: ID does not exist" containerID="15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.768991 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8"} err="failed to get container status \"15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8\": rpc error: code = NotFound desc = could not find container \"15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8\": container with ID starting with 15173a69b1d0e249384f4f1315cffb471b59c6f6d0921e47a8fdac16b05e69e8 not found: ID does not exist" Jan 22 16:30:39 crc kubenswrapper[5032]: I0122 16:30:39.834548 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7brs8"] Jan 22 16:30:40 crc kubenswrapper[5032]: I0122 16:30:40.466297 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" path="/var/lib/kubelet/pods/58a29045-233e-4b3d-a7d4-acbfcce2dc7a/volumes" Jan 22 16:30:40 crc kubenswrapper[5032]: I0122 16:30:40.692080 5032 generic.go:334] "Generic (PLEG): container finished" podID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerID="f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5" exitCode=0 Jan 22 16:30:40 crc kubenswrapper[5032]: I0122 16:30:40.692224 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7brs8" event={"ID":"e3627b49-2dc6-42b5-a920-22d9aca7d4dd","Type":"ContainerDied","Data":"f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5"} Jan 22 16:30:40 crc kubenswrapper[5032]: I0122 16:30:40.692266 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7brs8" event={"ID":"e3627b49-2dc6-42b5-a920-22d9aca7d4dd","Type":"ContainerStarted","Data":"9e30ded1e8c2b59229c81dc3e0c44e9023b5c30fb6653dc6ff6ae954e7dd97d2"} Jan 22 16:30:44 crc kubenswrapper[5032]: I0122 16:30:44.734990 5032 generic.go:334] "Generic (PLEG): container finished" podID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerID="0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568" exitCode=0 Jan 22 16:30:44 crc kubenswrapper[5032]: I0122 16:30:44.735073 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7brs8" event={"ID":"e3627b49-2dc6-42b5-a920-22d9aca7d4dd","Type":"ContainerDied","Data":"0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568"} Jan 22 16:30:45 crc kubenswrapper[5032]: I0122 16:30:45.750094 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7brs8" event={"ID":"e3627b49-2dc6-42b5-a920-22d9aca7d4dd","Type":"ContainerStarted","Data":"a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b"} Jan 22 16:30:45 crc kubenswrapper[5032]: I0122 16:30:45.782210 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7brs8" podStartSLOduration=2.279103267 podStartE2EDuration="6.782190905s" podCreationTimestamp="2026-01-22 16:30:39 +0000 UTC" firstStartedPulling="2026-01-22 16:30:40.695708998 +0000 UTC m=+1544.507878029" lastFinishedPulling="2026-01-22 16:30:45.198796636 +0000 UTC m=+1549.010965667" observedRunningTime="2026-01-22 16:30:45.77599372 +0000 UTC m=+1549.588162791" watchObservedRunningTime="2026-01-22 16:30:45.782190905 +0000 UTC m=+1549.594359956" Jan 22 16:30:49 crc kubenswrapper[5032]: I0122 16:30:49.377361 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:49 crc kubenswrapper[5032]: I0122 16:30:49.377981 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:49 crc kubenswrapper[5032]: I0122 16:30:49.451808 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:59 crc kubenswrapper[5032]: I0122 16:30:59.426425 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:30:59 crc kubenswrapper[5032]: I0122 16:30:59.503504 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7brs8"] Jan 22 16:30:59 crc kubenswrapper[5032]: I0122 16:30:59.558699 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5nnrt"] Jan 22 16:30:59 crc kubenswrapper[5032]: I0122 16:30:59.558995 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5nnrt" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="registry-server" containerID="cri-o://e376001bc52b3cd612659e9d36a330c208382a446394ea4c8f1df4fa7a1d61a9" gracePeriod=2 Jan 22 16:30:59 crc kubenswrapper[5032]: I0122 16:30:59.930727 5032 generic.go:334] "Generic (PLEG): container finished" podID="66a6f462-61b5-4b57-af6f-858294b300eb" containerID="e376001bc52b3cd612659e9d36a330c208382a446394ea4c8f1df4fa7a1d61a9" exitCode=0 Jan 22 16:30:59 crc kubenswrapper[5032]: I0122 16:30:59.931521 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5nnrt" event={"ID":"66a6f462-61b5-4b57-af6f-858294b300eb","Type":"ContainerDied","Data":"e376001bc52b3cd612659e9d36a330c208382a446394ea4c8f1df4fa7a1d61a9"} Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.079260 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.223734 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-utilities\") pod \"66a6f462-61b5-4b57-af6f-858294b300eb\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.223899 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kg6s8\" (UniqueName: \"kubernetes.io/projected/66a6f462-61b5-4b57-af6f-858294b300eb-kube-api-access-kg6s8\") pod \"66a6f462-61b5-4b57-af6f-858294b300eb\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.223992 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-catalog-content\") pod \"66a6f462-61b5-4b57-af6f-858294b300eb\" (UID: \"66a6f462-61b5-4b57-af6f-858294b300eb\") " Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.224530 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-utilities" (OuterVolumeSpecName: "utilities") pod "66a6f462-61b5-4b57-af6f-858294b300eb" (UID: "66a6f462-61b5-4b57-af6f-858294b300eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.230980 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a6f462-61b5-4b57-af6f-858294b300eb-kube-api-access-kg6s8" (OuterVolumeSpecName: "kube-api-access-kg6s8") pod "66a6f462-61b5-4b57-af6f-858294b300eb" (UID: "66a6f462-61b5-4b57-af6f-858294b300eb"). InnerVolumeSpecName "kube-api-access-kg6s8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.287393 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66a6f462-61b5-4b57-af6f-858294b300eb" (UID: "66a6f462-61b5-4b57-af6f-858294b300eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.325781 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kg6s8\" (UniqueName: \"kubernetes.io/projected/66a6f462-61b5-4b57-af6f-858294b300eb-kube-api-access-kg6s8\") on node \"crc\" DevicePath \"\"" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.326022 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.326110 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a6f462-61b5-4b57-af6f-858294b300eb-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.941750 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5nnrt" event={"ID":"66a6f462-61b5-4b57-af6f-858294b300eb","Type":"ContainerDied","Data":"772d48f96674b78cdc4118147c557f738a1fbabbc217254b0f1410037aef206b"} Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.941810 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5nnrt" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.941821 5032 scope.go:117] "RemoveContainer" containerID="e376001bc52b3cd612659e9d36a330c208382a446394ea4c8f1df4fa7a1d61a9" Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.968687 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5nnrt"] Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.976749 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5nnrt"] Jan 22 16:31:00 crc kubenswrapper[5032]: I0122 16:31:00.986427 5032 scope.go:117] "RemoveContainer" containerID="bc2bb9eb1245fcacca712126b2f2c47b322a38b3c49c0cdde66c6f7cd3eab203" Jan 22 16:31:01 crc kubenswrapper[5032]: I0122 16:31:01.008278 5032 scope.go:117] "RemoveContainer" containerID="67e5da849d5ddc34d211ac57c737dcd64149d22b2dda6db11c6e79fd278456f5" Jan 22 16:31:02 crc kubenswrapper[5032]: I0122 16:31:02.468604 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" path="/var/lib/kubelet/pods/66a6f462-61b5-4b57-af6f-858294b300eb/volumes" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.017354 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.017466 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.017548 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.018772 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.018952 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" gracePeriod=600 Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.165142 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478028 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ldvsm"] Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.478566 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="extract-content" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478585 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="extract-content" Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.478597 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="extract-content" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478605 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="extract-content" Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.478622 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="registry-server" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478630 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="registry-server" Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.478658 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="extract-utilities" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478666 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="extract-utilities" Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.478689 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="registry-server" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478696 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="registry-server" Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.478712 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="extract-utilities" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478720 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="extract-utilities" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.478977 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="58a29045-233e-4b3d-a7d4-acbfcce2dc7a" containerName="registry-server" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.479014 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a6f462-61b5-4b57-af6f-858294b300eb" containerName="registry-server" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.480712 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.499499 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ldvsm"] Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.585738 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-catalog-content\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.586039 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-utilities\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.586286 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbwct\" (UniqueName: \"kubernetes.io/projected/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-kube-api-access-jbwct\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.688292 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-utilities\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.688431 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbwct\" (UniqueName: \"kubernetes.io/projected/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-kube-api-access-jbwct\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.688516 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-catalog-content\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.688776 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-utilities\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.689040 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-catalog-content\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.711371 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbwct\" (UniqueName: \"kubernetes.io/projected/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-kube-api-access-jbwct\") pod \"redhat-marketplace-ldvsm\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.838536 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.992379 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" exitCode=0 Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.992709 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403"} Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.992743 5032 scope.go:117] "RemoveContainer" containerID="faf935adc77a3c0abaa2d5b7ee2ef0557447fb464c98230ba8a8d7f0657845b1" Jan 22 16:31:03 crc kubenswrapper[5032]: I0122 16:31:03.993120 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:31:03 crc kubenswrapper[5032]: E0122 16:31:03.995281 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:31:04 crc kubenswrapper[5032]: I0122 16:31:04.311812 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ldvsm"] Jan 22 16:31:05 crc kubenswrapper[5032]: I0122 16:31:05.014112 5032 generic.go:334] "Generic (PLEG): container finished" podID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerID="e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b" exitCode=0 Jan 22 16:31:05 crc kubenswrapper[5032]: I0122 16:31:05.014229 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ldvsm" event={"ID":"ac3e3154-4150-43fe-9a6f-d9c766aa5b64","Type":"ContainerDied","Data":"e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b"} Jan 22 16:31:05 crc kubenswrapper[5032]: I0122 16:31:05.014523 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ldvsm" event={"ID":"ac3e3154-4150-43fe-9a6f-d9c766aa5b64","Type":"ContainerStarted","Data":"91bb40d7d7eda5e3116e041ca48d6e986ea57bef11c981a3e6d9cad06a5751b0"} Jan 22 16:31:07 crc kubenswrapper[5032]: I0122 16:31:07.041817 5032 generic.go:334] "Generic (PLEG): container finished" podID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerID="47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663" exitCode=0 Jan 22 16:31:07 crc kubenswrapper[5032]: I0122 16:31:07.041910 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ldvsm" event={"ID":"ac3e3154-4150-43fe-9a6f-d9c766aa5b64","Type":"ContainerDied","Data":"47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663"} Jan 22 16:31:08 crc kubenswrapper[5032]: I0122 16:31:08.056074 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ldvsm" event={"ID":"ac3e3154-4150-43fe-9a6f-d9c766aa5b64","Type":"ContainerStarted","Data":"da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04"} Jan 22 16:31:08 crc kubenswrapper[5032]: I0122 16:31:08.098976 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ldvsm" podStartSLOduration=2.329257061 podStartE2EDuration="5.098958513s" podCreationTimestamp="2026-01-22 16:31:03 +0000 UTC" firstStartedPulling="2026-01-22 16:31:05.024133151 +0000 UTC m=+1568.836302192" lastFinishedPulling="2026-01-22 16:31:07.793834613 +0000 UTC m=+1571.606003644" observedRunningTime="2026-01-22 16:31:08.083960293 +0000 UTC m=+1571.896129324" watchObservedRunningTime="2026-01-22 16:31:08.098958513 +0000 UTC m=+1571.911127544" Jan 22 16:31:13 crc kubenswrapper[5032]: I0122 16:31:13.839760 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:13 crc kubenswrapper[5032]: I0122 16:31:13.840306 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:13 crc kubenswrapper[5032]: I0122 16:31:13.934767 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:14 crc kubenswrapper[5032]: I0122 16:31:14.177228 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:14 crc kubenswrapper[5032]: I0122 16:31:14.228891 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ldvsm"] Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.142966 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ldvsm" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="registry-server" containerID="cri-o://da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04" gracePeriod=2 Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.732314 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.886358 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-utilities\") pod \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.886460 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-catalog-content\") pod \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.886642 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbwct\" (UniqueName: \"kubernetes.io/projected/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-kube-api-access-jbwct\") pod \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\" (UID: \"ac3e3154-4150-43fe-9a6f-d9c766aa5b64\") " Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.887204 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-utilities" (OuterVolumeSpecName: "utilities") pod "ac3e3154-4150-43fe-9a6f-d9c766aa5b64" (UID: "ac3e3154-4150-43fe-9a6f-d9c766aa5b64"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.892098 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-kube-api-access-jbwct" (OuterVolumeSpecName: "kube-api-access-jbwct") pod "ac3e3154-4150-43fe-9a6f-d9c766aa5b64" (UID: "ac3e3154-4150-43fe-9a6f-d9c766aa5b64"). InnerVolumeSpecName "kube-api-access-jbwct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.907716 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ac3e3154-4150-43fe-9a6f-d9c766aa5b64" (UID: "ac3e3154-4150-43fe-9a6f-d9c766aa5b64"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.989362 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.989420 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:31:16 crc kubenswrapper[5032]: I0122 16:31:16.989445 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbwct\" (UniqueName: \"kubernetes.io/projected/ac3e3154-4150-43fe-9a6f-d9c766aa5b64-kube-api-access-jbwct\") on node \"crc\" DevicePath \"\"" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.159501 5032 generic.go:334] "Generic (PLEG): container finished" podID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerID="da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04" exitCode=0 Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.159565 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ldvsm" event={"ID":"ac3e3154-4150-43fe-9a6f-d9c766aa5b64","Type":"ContainerDied","Data":"da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04"} Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.159571 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ldvsm" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.159620 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ldvsm" event={"ID":"ac3e3154-4150-43fe-9a6f-d9c766aa5b64","Type":"ContainerDied","Data":"91bb40d7d7eda5e3116e041ca48d6e986ea57bef11c981a3e6d9cad06a5751b0"} Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.159652 5032 scope.go:117] "RemoveContainer" containerID="da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.197414 5032 scope.go:117] "RemoveContainer" containerID="47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.210963 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ldvsm"] Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.220274 5032 scope.go:117] "RemoveContainer" containerID="e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.220628 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ldvsm"] Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.280384 5032 scope.go:117] "RemoveContainer" containerID="da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04" Jan 22 16:31:17 crc kubenswrapper[5032]: E0122 16:31:17.281355 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04\": container with ID starting with da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04 not found: ID does not exist" containerID="da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.281400 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04"} err="failed to get container status \"da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04\": rpc error: code = NotFound desc = could not find container \"da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04\": container with ID starting with da0c12fc8446c5078edbe47ad5afb768a7e2a852ce03b90a4512f4d9ceadaa04 not found: ID does not exist" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.281429 5032 scope.go:117] "RemoveContainer" containerID="47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663" Jan 22 16:31:17 crc kubenswrapper[5032]: E0122 16:31:17.281725 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663\": container with ID starting with 47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663 not found: ID does not exist" containerID="47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.281749 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663"} err="failed to get container status \"47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663\": rpc error: code = NotFound desc = could not find container \"47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663\": container with ID starting with 47a0323fe10092b1c92a4dc500a1a0e805397bfb31aeb49c2dd73d2d17d86663 not found: ID does not exist" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.281762 5032 scope.go:117] "RemoveContainer" containerID="e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b" Jan 22 16:31:17 crc kubenswrapper[5032]: E0122 16:31:17.282162 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b\": container with ID starting with e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b not found: ID does not exist" containerID="e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b" Jan 22 16:31:17 crc kubenswrapper[5032]: I0122 16:31:17.282183 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b"} err="failed to get container status \"e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b\": rpc error: code = NotFound desc = could not find container \"e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b\": container with ID starting with e099102634f1d7962ece53495ea5691127fb2b663a21026edba87575f4a7948b not found: ID does not exist" Jan 22 16:31:18 crc kubenswrapper[5032]: I0122 16:31:18.455966 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:31:18 crc kubenswrapper[5032]: E0122 16:31:18.456262 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:31:18 crc kubenswrapper[5032]: I0122 16:31:18.465834 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" path="/var/lib/kubelet/pods/ac3e3154-4150-43fe-9a6f-d9c766aa5b64/volumes" Jan 22 16:31:32 crc kubenswrapper[5032]: I0122 16:31:32.458694 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:31:32 crc kubenswrapper[5032]: E0122 16:31:32.460957 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:31:46 crc kubenswrapper[5032]: I0122 16:31:46.464297 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:31:46 crc kubenswrapper[5032]: E0122 16:31:46.465132 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:31:58 crc kubenswrapper[5032]: I0122 16:31:58.456516 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:31:58 crc kubenswrapper[5032]: E0122 16:31:58.457928 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:32:10 crc kubenswrapper[5032]: I0122 16:32:10.455058 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:32:10 crc kubenswrapper[5032]: E0122 16:32:10.455904 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:32:17 crc kubenswrapper[5032]: I0122 16:32:17.114289 5032 scope.go:117] "RemoveContainer" containerID="bdbda56f7f042772642f178e95bcd257a3772a4e7e3d2491046ad6124022a599" Jan 22 16:32:17 crc kubenswrapper[5032]: I0122 16:32:17.152780 5032 scope.go:117] "RemoveContainer" containerID="773d372463d1716aae14a710bae07c44cf2d257ce961dd6d66e3c5e16b866d31" Jan 22 16:32:17 crc kubenswrapper[5032]: I0122 16:32:17.183477 5032 scope.go:117] "RemoveContainer" containerID="e9c5547d277ef69a8e36044d8a21d259c0c38a16407229fd83db8a518eef339b" Jan 22 16:32:17 crc kubenswrapper[5032]: I0122 16:32:17.214921 5032 scope.go:117] "RemoveContainer" containerID="268d93dfa7c89d797585e318654b97b45dfa70c15ae4846046696fd59dbeb88b" Jan 22 16:32:24 crc kubenswrapper[5032]: I0122 16:32:24.455032 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:32:24 crc kubenswrapper[5032]: E0122 16:32:24.455826 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:32:39 crc kubenswrapper[5032]: I0122 16:32:39.454831 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:32:39 crc kubenswrapper[5032]: E0122 16:32:39.455748 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:32:52 crc kubenswrapper[5032]: I0122 16:32:52.455465 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:32:52 crc kubenswrapper[5032]: E0122 16:32:52.456278 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:32:56 crc kubenswrapper[5032]: I0122 16:32:56.318790 5032 generic.go:334] "Generic (PLEG): container finished" podID="547c061b-8a80-410b-b177-46281b19211c" containerID="86544a74ae28ff8e5ac35637e6b043c2dcecd86f21199f6ccddc46300b02a7ed" exitCode=0 Jan 22 16:32:56 crc kubenswrapper[5032]: I0122 16:32:56.319623 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" event={"ID":"547c061b-8a80-410b-b177-46281b19211c","Type":"ContainerDied","Data":"86544a74ae28ff8e5ac35637e6b043c2dcecd86f21199f6ccddc46300b02a7ed"} Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.760226 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.889677 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-ssh-key-openstack-edpm-ipam\") pod \"547c061b-8a80-410b-b177-46281b19211c\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.889785 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-bootstrap-combined-ca-bundle\") pod \"547c061b-8a80-410b-b177-46281b19211c\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.889938 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsnfz\" (UniqueName: \"kubernetes.io/projected/547c061b-8a80-410b-b177-46281b19211c-kube-api-access-qsnfz\") pod \"547c061b-8a80-410b-b177-46281b19211c\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.889979 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-inventory\") pod \"547c061b-8a80-410b-b177-46281b19211c\" (UID: \"547c061b-8a80-410b-b177-46281b19211c\") " Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.898144 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/547c061b-8a80-410b-b177-46281b19211c-kube-api-access-qsnfz" (OuterVolumeSpecName: "kube-api-access-qsnfz") pod "547c061b-8a80-410b-b177-46281b19211c" (UID: "547c061b-8a80-410b-b177-46281b19211c"). InnerVolumeSpecName "kube-api-access-qsnfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.899983 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "547c061b-8a80-410b-b177-46281b19211c" (UID: "547c061b-8a80-410b-b177-46281b19211c"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.919599 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "547c061b-8a80-410b-b177-46281b19211c" (UID: "547c061b-8a80-410b-b177-46281b19211c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.923709 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-inventory" (OuterVolumeSpecName: "inventory") pod "547c061b-8a80-410b-b177-46281b19211c" (UID: "547c061b-8a80-410b-b177-46281b19211c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.992730 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.992769 5032 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.992779 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsnfz\" (UniqueName: \"kubernetes.io/projected/547c061b-8a80-410b-b177-46281b19211c-kube-api-access-qsnfz\") on node \"crc\" DevicePath \"\"" Jan 22 16:32:57 crc kubenswrapper[5032]: I0122 16:32:57.992789 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/547c061b-8a80-410b-b177-46281b19211c-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.342853 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" event={"ID":"547c061b-8a80-410b-b177-46281b19211c","Type":"ContainerDied","Data":"9ad45014940ab9745db7d93068e60b358548b9de87848e3c360a56ba0089329b"} Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.342914 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ad45014940ab9745db7d93068e60b358548b9de87848e3c360a56ba0089329b" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.343084 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.432359 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt"] Jan 22 16:32:58 crc kubenswrapper[5032]: E0122 16:32:58.432788 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="547c061b-8a80-410b-b177-46281b19211c" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.432812 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="547c061b-8a80-410b-b177-46281b19211c" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 22 16:32:58 crc kubenswrapper[5032]: E0122 16:32:58.432823 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="extract-utilities" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.432831 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="extract-utilities" Jan 22 16:32:58 crc kubenswrapper[5032]: E0122 16:32:58.432845 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="registry-server" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.432852 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="registry-server" Jan 22 16:32:58 crc kubenswrapper[5032]: E0122 16:32:58.432883 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="extract-content" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.432890 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="extract-content" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.433125 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="547c061b-8a80-410b-b177-46281b19211c" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.433148 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac3e3154-4150-43fe-9a6f-d9c766aa5b64" containerName="registry-server" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.433888 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.437906 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.438324 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.440917 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.447049 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.470926 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt"] Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.502630 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxlt5\" (UniqueName: \"kubernetes.io/projected/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-kube-api-access-cxlt5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.502837 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.502933 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.605783 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.606001 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.606778 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxlt5\" (UniqueName: \"kubernetes.io/projected/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-kube-api-access-cxlt5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.610874 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.613265 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.634955 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxlt5\" (UniqueName: \"kubernetes.io/projected/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-kube-api-access-cxlt5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-msxgt\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:58 crc kubenswrapper[5032]: I0122 16:32:58.753753 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:32:59 crc kubenswrapper[5032]: I0122 16:32:59.048568 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-8b2d-account-create-update-jldnb"] Jan 22 16:32:59 crc kubenswrapper[5032]: I0122 16:32:59.061268 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-c2x7n"] Jan 22 16:32:59 crc kubenswrapper[5032]: I0122 16:32:59.075540 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-8b2d-account-create-update-jldnb"] Jan 22 16:32:59 crc kubenswrapper[5032]: I0122 16:32:59.085965 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-c2x7n"] Jan 22 16:32:59 crc kubenswrapper[5032]: I0122 16:32:59.428171 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt"] Jan 22 16:32:59 crc kubenswrapper[5032]: I0122 16:32:59.491204 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.035698 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-k8lnb"] Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.059036 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-nsk8m"] Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.076232 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-k8lnb"] Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.088512 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-nsk8m"] Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.368483 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" event={"ID":"b5c32cc3-0b61-403e-ac6d-fcec5330aec1","Type":"ContainerStarted","Data":"fffd43c95b86478ea32b0a6be5cd89929ea53ae1890971c9c09cae4865081fbc"} Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.468230 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48dd1fea-2fab-4318-81f4-38c3a21eba22" path="/var/lib/kubelet/pods/48dd1fea-2fab-4318-81f4-38c3a21eba22/volumes" Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.469420 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91fbfb7d-ff4b-4d4e-9faf-dff333949712" path="/var/lib/kubelet/pods/91fbfb7d-ff4b-4d4e-9faf-dff333949712/volumes" Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.470621 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="926e4c07-ca1a-4f2f-9027-2e92f46eed39" path="/var/lib/kubelet/pods/926e4c07-ca1a-4f2f-9027-2e92f46eed39/volumes" Jan 22 16:33:00 crc kubenswrapper[5032]: I0122 16:33:00.472568 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e17a9ac9-bf21-4ea7-8801-8108d36d4aae" path="/var/lib/kubelet/pods/e17a9ac9-bf21-4ea7-8801-8108d36d4aae/volumes" Jan 22 16:33:01 crc kubenswrapper[5032]: I0122 16:33:01.046951 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-fcb7-account-create-update-xphbq"] Jan 22 16:33:01 crc kubenswrapper[5032]: I0122 16:33:01.057629 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-96eb-account-create-update-cms95"] Jan 22 16:33:01 crc kubenswrapper[5032]: I0122 16:33:01.063032 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-96eb-account-create-update-cms95"] Jan 22 16:33:01 crc kubenswrapper[5032]: I0122 16:33:01.070000 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-fcb7-account-create-update-xphbq"] Jan 22 16:33:01 crc kubenswrapper[5032]: I0122 16:33:01.381398 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" event={"ID":"b5c32cc3-0b61-403e-ac6d-fcec5330aec1","Type":"ContainerStarted","Data":"90a95321f69c7fdbec344aea2bd76eebe475cfd73099a015e4e02cf9efd4585e"} Jan 22 16:33:01 crc kubenswrapper[5032]: I0122 16:33:01.410851 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" podStartSLOduration=2.687882981 podStartE2EDuration="3.410831252s" podCreationTimestamp="2026-01-22 16:32:58 +0000 UTC" firstStartedPulling="2026-01-22 16:32:59.49099645 +0000 UTC m=+1683.303165481" lastFinishedPulling="2026-01-22 16:33:00.213944681 +0000 UTC m=+1684.026113752" observedRunningTime="2026-01-22 16:33:01.397417403 +0000 UTC m=+1685.209586474" watchObservedRunningTime="2026-01-22 16:33:01.410831252 +0000 UTC m=+1685.223000293" Jan 22 16:33:02 crc kubenswrapper[5032]: I0122 16:33:02.470979 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64ab4105-77f7-4ee0-95a9-9e8a08acd357" path="/var/lib/kubelet/pods/64ab4105-77f7-4ee0-95a9-9e8a08acd357/volumes" Jan 22 16:33:02 crc kubenswrapper[5032]: I0122 16:33:02.473548 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="981bbcc4-7806-4b54-956f-e686bbf0c7f1" path="/var/lib/kubelet/pods/981bbcc4-7806-4b54-956f-e686bbf0c7f1/volumes" Jan 22 16:33:04 crc kubenswrapper[5032]: I0122 16:33:04.455533 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:33:04 crc kubenswrapper[5032]: E0122 16:33:04.456386 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:33:08 crc kubenswrapper[5032]: I0122 16:33:08.068757 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-7lw6c"] Jan 22 16:33:08 crc kubenswrapper[5032]: I0122 16:33:08.081434 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-7lw6c"] Jan 22 16:33:08 crc kubenswrapper[5032]: I0122 16:33:08.475152 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c50ae02a-9b33-400e-963a-aaa815cd061b" path="/var/lib/kubelet/pods/c50ae02a-9b33-400e-963a-aaa815cd061b/volumes" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.303825 5032 scope.go:117] "RemoveContainer" containerID="f2b8db2f9f32bd8ef318a34f79bc0d86746744be0801a396c7c45e264aa82478" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.332537 5032 scope.go:117] "RemoveContainer" containerID="969b14fa1ad50b1be028371d8276f66999fd32c47906c108fa16300e2ffb028d" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.408503 5032 scope.go:117] "RemoveContainer" containerID="b3e4430ebef0d9a3446951916cb63c0c4c9b41882e1356df3049a1da004b1cc4" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.444063 5032 scope.go:117] "RemoveContainer" containerID="cf04f36add102f7cac258558524de49e83ed544592df4f0a512d08cf888e0f15" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.480586 5032 scope.go:117] "RemoveContainer" containerID="10ea5ea2e8f110d10708d62edec99683c4190ea064d5681acc8161135c8b7f9a" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.508968 5032 scope.go:117] "RemoveContainer" containerID="59c1ad5ed1f15ac53a2c3f12d4fc978baac2ff3cd7f1329e79f963195fa4b69e" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.558087 5032 scope.go:117] "RemoveContainer" containerID="2da6cfe59fed5b6d6a5385f7fde3732110ed34469232d3adbab7978cd6163ed6" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.587218 5032 scope.go:117] "RemoveContainer" containerID="7efb84d1b82111866f026c33af2d9539a002bdb2acf94c0544340f74c3eecd1e" Jan 22 16:33:17 crc kubenswrapper[5032]: I0122 16:33:17.608439 5032 scope.go:117] "RemoveContainer" containerID="faabb8e0a26b6d47c4c7c74e794d371a4bc189e9f7aa1e4d93748b2f2fa5a517" Jan 22 16:33:18 crc kubenswrapper[5032]: I0122 16:33:18.455089 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:33:18 crc kubenswrapper[5032]: E0122 16:33:18.455825 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.045129 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-8qf6r"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.059959 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-lvq7q"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.072661 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-11dc-account-create-update-n66d9"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.080318 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-lvq7q"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.096728 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-15b8-account-create-update-tk2d2"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.110563 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-15b8-account-create-update-tk2d2"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.110619 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-11dc-account-create-update-n66d9"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.110630 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-8qf6r"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.119119 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-jkm9n"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.126123 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-jkm9n"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.140577 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d2fb-account-create-update-96qf2"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.140623 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d2fb-account-create-update-96qf2"] Jan 22 16:33:29 crc kubenswrapper[5032]: I0122 16:33:29.456195 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:33:29 crc kubenswrapper[5032]: E0122 16:33:29.456965 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:33:30 crc kubenswrapper[5032]: I0122 16:33:30.473719 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09a04ba4-1451-4a64-9f1a-4f06fa96c352" path="/var/lib/kubelet/pods/09a04ba4-1451-4a64-9f1a-4f06fa96c352/volumes" Jan 22 16:33:30 crc kubenswrapper[5032]: I0122 16:33:30.475361 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cd3c01c-a661-4710-81fe-92a4b7af8073" path="/var/lib/kubelet/pods/1cd3c01c-a661-4710-81fe-92a4b7af8073/volumes" Jan 22 16:33:30 crc kubenswrapper[5032]: I0122 16:33:30.476721 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53" path="/var/lib/kubelet/pods/1ebb92cf-7cfa-4d87-995f-eef5fbd8ec53/volumes" Jan 22 16:33:30 crc kubenswrapper[5032]: I0122 16:33:30.477927 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="527109ed-e0d3-45c7-b9b4-ae78b90e28d3" path="/var/lib/kubelet/pods/527109ed-e0d3-45c7-b9b4-ae78b90e28d3/volumes" Jan 22 16:33:30 crc kubenswrapper[5032]: I0122 16:33:30.480005 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99ab8480-ed26-4f7e-8086-30d0d1c2cf4b" path="/var/lib/kubelet/pods/99ab8480-ed26-4f7e-8086-30d0d1c2cf4b/volumes" Jan 22 16:33:30 crc kubenswrapper[5032]: I0122 16:33:30.481110 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aebc03fb-af9a-4652-8649-d8e2275faa46" path="/var/lib/kubelet/pods/aebc03fb-af9a-4652-8649-d8e2275faa46/volumes" Jan 22 16:33:33 crc kubenswrapper[5032]: I0122 16:33:33.052428 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-vjptd"] Jan 22 16:33:33 crc kubenswrapper[5032]: I0122 16:33:33.063068 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-vjptd"] Jan 22 16:33:34 crc kubenswrapper[5032]: I0122 16:33:34.466436 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8f0b2b5-fc74-4525-bf25-5d00abc5c89c" path="/var/lib/kubelet/pods/b8f0b2b5-fc74-4525-bf25-5d00abc5c89c/volumes" Jan 22 16:33:36 crc kubenswrapper[5032]: I0122 16:33:36.033540 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-9zk6w"] Jan 22 16:33:36 crc kubenswrapper[5032]: I0122 16:33:36.042390 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-9zk6w"] Jan 22 16:33:36 crc kubenswrapper[5032]: I0122 16:33:36.469474 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="300400f9-f92b-4159-982c-310398e166c8" path="/var/lib/kubelet/pods/300400f9-f92b-4159-982c-310398e166c8/volumes" Jan 22 16:33:42 crc kubenswrapper[5032]: I0122 16:33:42.455129 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:33:42 crc kubenswrapper[5032]: E0122 16:33:42.456401 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:33:53 crc kubenswrapper[5032]: I0122 16:33:53.455614 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:33:53 crc kubenswrapper[5032]: E0122 16:33:53.457253 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:34:05 crc kubenswrapper[5032]: I0122 16:34:05.454520 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:34:05 crc kubenswrapper[5032]: E0122 16:34:05.455294 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:34:16 crc kubenswrapper[5032]: I0122 16:34:16.471758 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:34:16 crc kubenswrapper[5032]: E0122 16:34:16.473018 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:34:17 crc kubenswrapper[5032]: I0122 16:34:17.779765 5032 scope.go:117] "RemoveContainer" containerID="97959e7aa9727703f3f1a95f91a2dc69d3e21db93dd2f95f7769fe44222a07f0" Jan 22 16:34:17 crc kubenswrapper[5032]: I0122 16:34:17.821432 5032 scope.go:117] "RemoveContainer" containerID="e2bf8296ca86e8d3119644f07167aa50ba55dcddcfddf9d8a0f16b99461e7370" Jan 22 16:34:17 crc kubenswrapper[5032]: I0122 16:34:17.905043 5032 scope.go:117] "RemoveContainer" containerID="11843ef879c0aee64b416fe7c872b45eadec752543a0f84304c2801c8816f927" Jan 22 16:34:17 crc kubenswrapper[5032]: I0122 16:34:17.980700 5032 scope.go:117] "RemoveContainer" containerID="8aee5ed8e0588ab1fa8d0fbdfd8a61d020ff2d997763e44fd9470465c0197cbf" Jan 22 16:34:18 crc kubenswrapper[5032]: I0122 16:34:18.008687 5032 scope.go:117] "RemoveContainer" containerID="821a6564675ec50faf16ea7cc5a0fc3fa26aeb191eb526ad55e028df360200a9" Jan 22 16:34:18 crc kubenswrapper[5032]: I0122 16:34:18.044933 5032 scope.go:117] "RemoveContainer" containerID="fcc49d9fdf4773cd4c396d472b82808c15a0c7090fe3fab93499a48475821aca" Jan 22 16:34:18 crc kubenswrapper[5032]: I0122 16:34:18.094186 5032 scope.go:117] "RemoveContainer" containerID="a6a095be6930dff471c0024b2ae1b31cbb75fd7cfdb5e65c1b672ea125fed651" Jan 22 16:34:18 crc kubenswrapper[5032]: I0122 16:34:18.117056 5032 scope.go:117] "RemoveContainer" containerID="20a77bc4b7f46522e4d6e6b01923b2f208f37916ac1495cb697dc3f9d3e6299b" Jan 22 16:34:25 crc kubenswrapper[5032]: I0122 16:34:25.069223 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-mk9tw"] Jan 22 16:34:25 crc kubenswrapper[5032]: I0122 16:34:25.083074 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-mk9tw"] Jan 22 16:34:26 crc kubenswrapper[5032]: I0122 16:34:26.473111 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6edd56b3-2854-4b22-8d59-b620200d7cfe" path="/var/lib/kubelet/pods/6edd56b3-2854-4b22-8d59-b620200d7cfe/volumes" Jan 22 16:34:29 crc kubenswrapper[5032]: I0122 16:34:29.454474 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:34:29 crc kubenswrapper[5032]: E0122 16:34:29.455391 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:34:31 crc kubenswrapper[5032]: I0122 16:34:31.036496 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-ng56d"] Jan 22 16:34:31 crc kubenswrapper[5032]: I0122 16:34:31.045742 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-ng56d"] Jan 22 16:34:32 crc kubenswrapper[5032]: I0122 16:34:32.476550 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18e81df2-1e39-45a9-9ea9-c9b9a54b92f7" path="/var/lib/kubelet/pods/18e81df2-1e39-45a9-9ea9-c9b9a54b92f7/volumes" Jan 22 16:34:40 crc kubenswrapper[5032]: I0122 16:34:40.060339 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-gf5jg"] Jan 22 16:34:40 crc kubenswrapper[5032]: I0122 16:34:40.072758 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-gf5jg"] Jan 22 16:34:40 crc kubenswrapper[5032]: I0122 16:34:40.468928 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b048493-c081-48d3-a908-70bf0efeba64" path="/var/lib/kubelet/pods/8b048493-c081-48d3-a908-70bf0efeba64/volumes" Jan 22 16:34:43 crc kubenswrapper[5032]: I0122 16:34:43.454947 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:34:43 crc kubenswrapper[5032]: E0122 16:34:43.455990 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:34:44 crc kubenswrapper[5032]: I0122 16:34:44.038883 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-jb65g"] Jan 22 16:34:44 crc kubenswrapper[5032]: I0122 16:34:44.048451 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-jb65g"] Jan 22 16:34:44 crc kubenswrapper[5032]: I0122 16:34:44.471021 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="175d212f-92e2-468a-ac51-5c821d160842" path="/var/lib/kubelet/pods/175d212f-92e2-468a-ac51-5c821d160842/volumes" Jan 22 16:34:45 crc kubenswrapper[5032]: I0122 16:34:45.511908 5032 generic.go:334] "Generic (PLEG): container finished" podID="b5c32cc3-0b61-403e-ac6d-fcec5330aec1" containerID="90a95321f69c7fdbec344aea2bd76eebe475cfd73099a015e4e02cf9efd4585e" exitCode=0 Jan 22 16:34:45 crc kubenswrapper[5032]: I0122 16:34:45.511918 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" event={"ID":"b5c32cc3-0b61-403e-ac6d-fcec5330aec1","Type":"ContainerDied","Data":"90a95321f69c7fdbec344aea2bd76eebe475cfd73099a015e4e02cf9efd4585e"} Jan 22 16:34:46 crc kubenswrapper[5032]: I0122 16:34:46.979849 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.103058 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxlt5\" (UniqueName: \"kubernetes.io/projected/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-kube-api-access-cxlt5\") pod \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.103423 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-inventory\") pod \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.103487 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-ssh-key-openstack-edpm-ipam\") pod \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\" (UID: \"b5c32cc3-0b61-403e-ac6d-fcec5330aec1\") " Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.111437 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-kube-api-access-cxlt5" (OuterVolumeSpecName: "kube-api-access-cxlt5") pod "b5c32cc3-0b61-403e-ac6d-fcec5330aec1" (UID: "b5c32cc3-0b61-403e-ac6d-fcec5330aec1"). InnerVolumeSpecName "kube-api-access-cxlt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.149728 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-inventory" (OuterVolumeSpecName: "inventory") pod "b5c32cc3-0b61-403e-ac6d-fcec5330aec1" (UID: "b5c32cc3-0b61-403e-ac6d-fcec5330aec1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.153722 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b5c32cc3-0b61-403e-ac6d-fcec5330aec1" (UID: "b5c32cc3-0b61-403e-ac6d-fcec5330aec1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.206608 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.206665 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.206694 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxlt5\" (UniqueName: \"kubernetes.io/projected/b5c32cc3-0b61-403e-ac6d-fcec5330aec1-kube-api-access-cxlt5\") on node \"crc\" DevicePath \"\"" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.553256 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" event={"ID":"b5c32cc3-0b61-403e-ac6d-fcec5330aec1","Type":"ContainerDied","Data":"fffd43c95b86478ea32b0a6be5cd89929ea53ae1890971c9c09cae4865081fbc"} Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.553311 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fffd43c95b86478ea32b0a6be5cd89929ea53ae1890971c9c09cae4865081fbc" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.553410 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-msxgt" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.654781 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n"] Jan 22 16:34:47 crc kubenswrapper[5032]: E0122 16:34:47.655256 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5c32cc3-0b61-403e-ac6d-fcec5330aec1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.655277 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5c32cc3-0b61-403e-ac6d-fcec5330aec1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.655461 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5c32cc3-0b61-403e-ac6d-fcec5330aec1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.656113 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.659036 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.659419 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.661528 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.661819 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.668462 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n"] Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.818059 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.818190 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsrqg\" (UniqueName: \"kubernetes.io/projected/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-kube-api-access-xsrqg\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.818378 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.920953 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.921226 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.921334 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsrqg\" (UniqueName: \"kubernetes.io/projected/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-kube-api-access-xsrqg\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.927123 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.940659 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.942057 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsrqg\" (UniqueName: \"kubernetes.io/projected/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-kube-api-access-xsrqg\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j284n\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:47 crc kubenswrapper[5032]: I0122 16:34:47.975778 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:34:48 crc kubenswrapper[5032]: I0122 16:34:48.684896 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n"] Jan 22 16:34:49 crc kubenswrapper[5032]: I0122 16:34:49.579626 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" event={"ID":"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c","Type":"ContainerStarted","Data":"72429d04770f827e5018170fba928a26c97c3a4e5d24cdc09d2628166ab537e3"} Jan 22 16:34:51 crc kubenswrapper[5032]: I0122 16:34:51.068189 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-dxmk2"] Jan 22 16:34:51 crc kubenswrapper[5032]: I0122 16:34:51.087617 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-dxmk2"] Jan 22 16:34:51 crc kubenswrapper[5032]: I0122 16:34:51.618313 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" event={"ID":"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c","Type":"ContainerStarted","Data":"952a2ea319931f63009ff13502caa1555776017b308410068a5c664889bf9ada"} Jan 22 16:34:51 crc kubenswrapper[5032]: I0122 16:34:51.651446 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" podStartSLOduration=2.90984086 podStartE2EDuration="4.651414388s" podCreationTimestamp="2026-01-22 16:34:47 +0000 UTC" firstStartedPulling="2026-01-22 16:34:48.67576365 +0000 UTC m=+1792.487932711" lastFinishedPulling="2026-01-22 16:34:50.417337178 +0000 UTC m=+1794.229506239" observedRunningTime="2026-01-22 16:34:51.642847347 +0000 UTC m=+1795.455016398" watchObservedRunningTime="2026-01-22 16:34:51.651414388 +0000 UTC m=+1795.463583469" Jan 22 16:34:52 crc kubenswrapper[5032]: I0122 16:34:52.474718 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="857e311d-5f3f-4c81-9f86-6d0a3c9e0307" path="/var/lib/kubelet/pods/857e311d-5f3f-4c81-9f86-6d0a3c9e0307/volumes" Jan 22 16:34:54 crc kubenswrapper[5032]: I0122 16:34:54.455463 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:34:54 crc kubenswrapper[5032]: E0122 16:34:54.456047 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:35:08 crc kubenswrapper[5032]: I0122 16:35:08.454750 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:35:08 crc kubenswrapper[5032]: E0122 16:35:08.455511 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:35:18 crc kubenswrapper[5032]: I0122 16:35:18.281033 5032 scope.go:117] "RemoveContainer" containerID="993447f33f973d6e3fee0c7551a69d446c00f519c48b60bfa88f5947350f5fef" Jan 22 16:35:18 crc kubenswrapper[5032]: I0122 16:35:18.331151 5032 scope.go:117] "RemoveContainer" containerID="138bf192d4d9cd4c98df6e324c87216f0d2deb4d054507fb90afd38369a65db0" Jan 22 16:35:18 crc kubenswrapper[5032]: I0122 16:35:18.445303 5032 scope.go:117] "RemoveContainer" containerID="27e06268d6656feccfcd5f2b92f2c1d29cc816b29fe34dce87415dfe46f214d7" Jan 22 16:35:18 crc kubenswrapper[5032]: I0122 16:35:18.510398 5032 scope.go:117] "RemoveContainer" containerID="0ff743aaa4eaee22f03fe812ea2d5b92cb2e3024f4648a0020790760250a0f33" Jan 22 16:35:18 crc kubenswrapper[5032]: I0122 16:35:18.549022 5032 scope.go:117] "RemoveContainer" containerID="0185960f97620fac9d4721bdb8a6837e4405402ad3a81960afc937ec6f06b8cb" Jan 22 16:35:19 crc kubenswrapper[5032]: I0122 16:35:19.054726 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-dq8h2"] Jan 22 16:35:19 crc kubenswrapper[5032]: I0122 16:35:19.065688 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-rvk27"] Jan 22 16:35:19 crc kubenswrapper[5032]: I0122 16:35:19.077335 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-dq8h2"] Jan 22 16:35:19 crc kubenswrapper[5032]: I0122 16:35:19.094353 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-rvk27"] Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.063379 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-ad31-account-create-update-lgnzz"] Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.073235 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-3697-account-create-update-mdn8x"] Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.083375 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-ad31-account-create-update-lgnzz"] Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.093034 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-3697-account-create-update-mdn8x"] Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.468377 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4994a5dc-21af-4f0d-a705-0a8b2f8c26bc" path="/var/lib/kubelet/pods/4994a5dc-21af-4f0d-a705-0a8b2f8c26bc/volumes" Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.469445 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b900cbd-8e74-4b76-8bb6-99c355ac6ebd" path="/var/lib/kubelet/pods/7b900cbd-8e74-4b76-8bb6-99c355ac6ebd/volumes" Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.470275 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8655adea-0114-43b1-a101-47535eda5bc5" path="/var/lib/kubelet/pods/8655adea-0114-43b1-a101-47535eda5bc5/volumes" Jan 22 16:35:20 crc kubenswrapper[5032]: I0122 16:35:20.471119 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa13677f-87b8-46f1-85fa-07b79dfd6ac8" path="/var/lib/kubelet/pods/aa13677f-87b8-46f1-85fa-07b79dfd6ac8/volumes" Jan 22 16:35:21 crc kubenswrapper[5032]: I0122 16:35:21.038779 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-a042-account-create-update-zrqtx"] Jan 22 16:35:21 crc kubenswrapper[5032]: I0122 16:35:21.056117 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-vpdhx"] Jan 22 16:35:21 crc kubenswrapper[5032]: I0122 16:35:21.071007 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-a042-account-create-update-zrqtx"] Jan 22 16:35:21 crc kubenswrapper[5032]: I0122 16:35:21.081978 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-vpdhx"] Jan 22 16:35:22 crc kubenswrapper[5032]: I0122 16:35:22.455796 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:35:22 crc kubenswrapper[5032]: E0122 16:35:22.456279 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:35:22 crc kubenswrapper[5032]: I0122 16:35:22.469095 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37939e63-b34f-4b59-bebe-b99b19dd1b5f" path="/var/lib/kubelet/pods/37939e63-b34f-4b59-bebe-b99b19dd1b5f/volumes" Jan 22 16:35:22 crc kubenswrapper[5032]: I0122 16:35:22.470271 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec68387f-04fc-4330-9b84-233dc14a5835" path="/var/lib/kubelet/pods/ec68387f-04fc-4330-9b84-233dc14a5835/volumes" Jan 22 16:35:35 crc kubenswrapper[5032]: I0122 16:35:35.455040 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:35:35 crc kubenswrapper[5032]: E0122 16:35:35.455978 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:35:48 crc kubenswrapper[5032]: I0122 16:35:48.455768 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:35:48 crc kubenswrapper[5032]: E0122 16:35:48.457009 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:35:50 crc kubenswrapper[5032]: I0122 16:35:50.071813 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-92g57"] Jan 22 16:35:50 crc kubenswrapper[5032]: I0122 16:35:50.081784 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-92g57"] Jan 22 16:35:50 crc kubenswrapper[5032]: I0122 16:35:50.466988 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c83b824-d7c9-413d-abdf-353330608097" path="/var/lib/kubelet/pods/6c83b824-d7c9-413d-abdf-353330608097/volumes" Jan 22 16:36:03 crc kubenswrapper[5032]: I0122 16:36:03.455979 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:36:04 crc kubenswrapper[5032]: I0122 16:36:04.418657 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"e9f7a0f239afe7188bd3aa6ac00d194c903af47fec7e24da32038be8da79eccd"} Jan 22 16:36:05 crc kubenswrapper[5032]: I0122 16:36:05.426985 5032 generic.go:334] "Generic (PLEG): container finished" podID="d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c" containerID="952a2ea319931f63009ff13502caa1555776017b308410068a5c664889bf9ada" exitCode=0 Jan 22 16:36:05 crc kubenswrapper[5032]: I0122 16:36:05.427120 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" event={"ID":"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c","Type":"ContainerDied","Data":"952a2ea319931f63009ff13502caa1555776017b308410068a5c664889bf9ada"} Jan 22 16:36:06 crc kubenswrapper[5032]: I0122 16:36:06.968595 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.112519 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-inventory\") pod \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.112698 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-ssh-key-openstack-edpm-ipam\") pod \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.112846 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsrqg\" (UniqueName: \"kubernetes.io/projected/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-kube-api-access-xsrqg\") pod \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\" (UID: \"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c\") " Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.123260 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-kube-api-access-xsrqg" (OuterVolumeSpecName: "kube-api-access-xsrqg") pod "d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c" (UID: "d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c"). InnerVolumeSpecName "kube-api-access-xsrqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.143134 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-inventory" (OuterVolumeSpecName: "inventory") pod "d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c" (UID: "d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.168566 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c" (UID: "d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.215684 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsrqg\" (UniqueName: \"kubernetes.io/projected/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-kube-api-access-xsrqg\") on node \"crc\" DevicePath \"\"" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.215741 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.215759 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.446237 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" event={"ID":"d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c","Type":"ContainerDied","Data":"72429d04770f827e5018170fba928a26c97c3a4e5d24cdc09d2628166ab537e3"} Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.446273 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72429d04770f827e5018170fba928a26c97c3a4e5d24cdc09d2628166ab537e3" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.446295 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j284n" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.568571 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s"] Jan 22 16:36:07 crc kubenswrapper[5032]: E0122 16:36:07.570444 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.570478 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.571469 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.573656 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.576933 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.577443 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.579558 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.580557 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.598991 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s"] Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.728124 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.728569 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7zg4\" (UniqueName: \"kubernetes.io/projected/d38e2b23-9fdc-47dd-9865-abf873d8bc74-kube-api-access-n7zg4\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.728640 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.830440 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7zg4\" (UniqueName: \"kubernetes.io/projected/d38e2b23-9fdc-47dd-9865-abf873d8bc74-kube-api-access-n7zg4\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.830496 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.830552 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.834598 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.834634 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.850763 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7zg4\" (UniqueName: \"kubernetes.io/projected/d38e2b23-9fdc-47dd-9865-abf873d8bc74-kube-api-access-n7zg4\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fp87s\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:07 crc kubenswrapper[5032]: I0122 16:36:07.902355 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:08 crc kubenswrapper[5032]: I0122 16:36:08.439007 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s"] Jan 22 16:36:08 crc kubenswrapper[5032]: W0122 16:36:08.440926 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd38e2b23_9fdc_47dd_9865_abf873d8bc74.slice/crio-289da1f388db584dfc9b452cbd7db44c38bea7c84bd2b1b51a74161cfee533c0 WatchSource:0}: Error finding container 289da1f388db584dfc9b452cbd7db44c38bea7c84bd2b1b51a74161cfee533c0: Status 404 returned error can't find the container with id 289da1f388db584dfc9b452cbd7db44c38bea7c84bd2b1b51a74161cfee533c0 Jan 22 16:36:08 crc kubenswrapper[5032]: I0122 16:36:08.465330 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" event={"ID":"d38e2b23-9fdc-47dd-9865-abf873d8bc74","Type":"ContainerStarted","Data":"289da1f388db584dfc9b452cbd7db44c38bea7c84bd2b1b51a74161cfee533c0"} Jan 22 16:36:10 crc kubenswrapper[5032]: I0122 16:36:10.485939 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" event={"ID":"d38e2b23-9fdc-47dd-9865-abf873d8bc74","Type":"ContainerStarted","Data":"5b3b45ab771cd36679d0216560d8c3a7e17a3d97c8967604f1f7fdcd14be50d4"} Jan 22 16:36:10 crc kubenswrapper[5032]: I0122 16:36:10.508382 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" podStartSLOduration=2.387259325 podStartE2EDuration="3.508356927s" podCreationTimestamp="2026-01-22 16:36:07 +0000 UTC" firstStartedPulling="2026-01-22 16:36:08.443050267 +0000 UTC m=+1872.255219298" lastFinishedPulling="2026-01-22 16:36:09.564147859 +0000 UTC m=+1873.376316900" observedRunningTime="2026-01-22 16:36:10.506058095 +0000 UTC m=+1874.318227166" watchObservedRunningTime="2026-01-22 16:36:10.508356927 +0000 UTC m=+1874.320525988" Jan 22 16:36:15 crc kubenswrapper[5032]: I0122 16:36:15.540393 5032 generic.go:334] "Generic (PLEG): container finished" podID="d38e2b23-9fdc-47dd-9865-abf873d8bc74" containerID="5b3b45ab771cd36679d0216560d8c3a7e17a3d97c8967604f1f7fdcd14be50d4" exitCode=0 Jan 22 16:36:15 crc kubenswrapper[5032]: I0122 16:36:15.540506 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" event={"ID":"d38e2b23-9fdc-47dd-9865-abf873d8bc74","Type":"ContainerDied","Data":"5b3b45ab771cd36679d0216560d8c3a7e17a3d97c8967604f1f7fdcd14be50d4"} Jan 22 16:36:16 crc kubenswrapper[5032]: I0122 16:36:16.979456 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.022762 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-ssh-key-openstack-edpm-ipam\") pod \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.022893 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7zg4\" (UniqueName: \"kubernetes.io/projected/d38e2b23-9fdc-47dd-9865-abf873d8bc74-kube-api-access-n7zg4\") pod \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.022930 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-inventory\") pod \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\" (UID: \"d38e2b23-9fdc-47dd-9865-abf873d8bc74\") " Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.030130 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d38e2b23-9fdc-47dd-9865-abf873d8bc74-kube-api-access-n7zg4" (OuterVolumeSpecName: "kube-api-access-n7zg4") pod "d38e2b23-9fdc-47dd-9865-abf873d8bc74" (UID: "d38e2b23-9fdc-47dd-9865-abf873d8bc74"). InnerVolumeSpecName "kube-api-access-n7zg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.065907 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-inventory" (OuterVolumeSpecName: "inventory") pod "d38e2b23-9fdc-47dd-9865-abf873d8bc74" (UID: "d38e2b23-9fdc-47dd-9865-abf873d8bc74"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.076085 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d38e2b23-9fdc-47dd-9865-abf873d8bc74" (UID: "d38e2b23-9fdc-47dd-9865-abf873d8bc74"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.124946 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.124979 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7zg4\" (UniqueName: \"kubernetes.io/projected/d38e2b23-9fdc-47dd-9865-abf873d8bc74-kube-api-access-n7zg4\") on node \"crc\" DevicePath \"\"" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.124988 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d38e2b23-9fdc-47dd-9865-abf873d8bc74-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.573518 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" event={"ID":"d38e2b23-9fdc-47dd-9865-abf873d8bc74","Type":"ContainerDied","Data":"289da1f388db584dfc9b452cbd7db44c38bea7c84bd2b1b51a74161cfee533c0"} Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.573564 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="289da1f388db584dfc9b452cbd7db44c38bea7c84bd2b1b51a74161cfee533c0" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.573628 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fp87s" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.671467 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97"] Jan 22 16:36:17 crc kubenswrapper[5032]: E0122 16:36:17.672310 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d38e2b23-9fdc-47dd-9865-abf873d8bc74" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.672348 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d38e2b23-9fdc-47dd-9865-abf873d8bc74" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.672669 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d38e2b23-9fdc-47dd-9865-abf873d8bc74" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.673522 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.676236 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.676571 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.676722 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.682395 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.695492 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97"] Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.835882 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx82n\" (UniqueName: \"kubernetes.io/projected/3843c5b3-e955-466a-8341-8e13e817172c-kube-api-access-jx82n\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.835954 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.836207 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.938270 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx82n\" (UniqueName: \"kubernetes.io/projected/3843c5b3-e955-466a-8341-8e13e817172c-kube-api-access-jx82n\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.938337 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.938391 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.946757 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.948137 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.955785 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx82n\" (UniqueName: \"kubernetes.io/projected/3843c5b3-e955-466a-8341-8e13e817172c-kube-api-access-jx82n\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-9nk97\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:17 crc kubenswrapper[5032]: I0122 16:36:17.997987 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.347894 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97"] Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.584273 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" event={"ID":"3843c5b3-e955-466a-8341-8e13e817172c","Type":"ContainerStarted","Data":"a891810f0c6e3d42dbcb1f08bdbc817c1f170cf81c72a82272503259503732f4"} Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.675462 5032 scope.go:117] "RemoveContainer" containerID="c7c7ce6416591cd56092d334e386f19884997d315b89732143ae16acd4ecfa2f" Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.712069 5032 scope.go:117] "RemoveContainer" containerID="6a3627df4bdb5b3993ef997e2e469602d28ae59018f0d2fe9c7c3168933e28f1" Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.775052 5032 scope.go:117] "RemoveContainer" containerID="852d664b9848105063e60d29ac3c01239ccb746805d76676e0183d98c236a3da" Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.812131 5032 scope.go:117] "RemoveContainer" containerID="93196dcce23769bcc71bea177c91a1481839ceca3bd0c4ab6b6ba303de2321e9" Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.839711 5032 scope.go:117] "RemoveContainer" containerID="f6487cef5f478908a3389988e4e0bc886923c3dc838968d8f65b0a6367d12a0a" Jan 22 16:36:18 crc kubenswrapper[5032]: I0122 16:36:18.938624 5032 scope.go:117] "RemoveContainer" containerID="73c5ea290912fe47dd080acfe335601d0fa52ebce0753bb288d78e30b55a73f8" Jan 22 16:36:19 crc kubenswrapper[5032]: I0122 16:36:19.016071 5032 scope.go:117] "RemoveContainer" containerID="093f290055f12fb95d7d1ebcfe1e49a76f1e349fddd5d0b5272b0c6c1089ec55" Jan 22 16:36:19 crc kubenswrapper[5032]: I0122 16:36:19.595107 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" event={"ID":"3843c5b3-e955-466a-8341-8e13e817172c","Type":"ContainerStarted","Data":"11b07ee83446977740954cc69055ffb24e248393f32eaa951e91070482b5c44f"} Jan 22 16:36:19 crc kubenswrapper[5032]: I0122 16:36:19.618834 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" podStartSLOduration=1.878550174 podStartE2EDuration="2.618803649s" podCreationTimestamp="2026-01-22 16:36:17 +0000 UTC" firstStartedPulling="2026-01-22 16:36:18.363946613 +0000 UTC m=+1882.176115644" lastFinishedPulling="2026-01-22 16:36:19.104200088 +0000 UTC m=+1882.916369119" observedRunningTime="2026-01-22 16:36:19.613410544 +0000 UTC m=+1883.425579585" watchObservedRunningTime="2026-01-22 16:36:19.618803649 +0000 UTC m=+1883.430972710" Jan 22 16:36:45 crc kubenswrapper[5032]: I0122 16:36:45.057210 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8h9gw"] Jan 22 16:36:45 crc kubenswrapper[5032]: I0122 16:36:45.076571 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh97r"] Jan 22 16:36:45 crc kubenswrapper[5032]: I0122 16:36:45.087348 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh97r"] Jan 22 16:36:45 crc kubenswrapper[5032]: I0122 16:36:45.104615 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8h9gw"] Jan 22 16:36:46 crc kubenswrapper[5032]: I0122 16:36:46.479306 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66e893a9-0ee3-4c0e-98d7-5d85d75b46c3" path="/var/lib/kubelet/pods/66e893a9-0ee3-4c0e-98d7-5d85d75b46c3/volumes" Jan 22 16:36:46 crc kubenswrapper[5032]: I0122 16:36:46.483727 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2acebf7-3c11-4c28-b0fc-980c3a05ab71" path="/var/lib/kubelet/pods/c2acebf7-3c11-4c28-b0fc-980c3a05ab71/volumes" Jan 22 16:37:02 crc kubenswrapper[5032]: I0122 16:37:02.059408 5032 generic.go:334] "Generic (PLEG): container finished" podID="3843c5b3-e955-466a-8341-8e13e817172c" containerID="11b07ee83446977740954cc69055ffb24e248393f32eaa951e91070482b5c44f" exitCode=0 Jan 22 16:37:02 crc kubenswrapper[5032]: I0122 16:37:02.059559 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" event={"ID":"3843c5b3-e955-466a-8341-8e13e817172c","Type":"ContainerDied","Data":"11b07ee83446977740954cc69055ffb24e248393f32eaa951e91070482b5c44f"} Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.614168 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.749257 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-inventory\") pod \"3843c5b3-e955-466a-8341-8e13e817172c\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.749451 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx82n\" (UniqueName: \"kubernetes.io/projected/3843c5b3-e955-466a-8341-8e13e817172c-kube-api-access-jx82n\") pod \"3843c5b3-e955-466a-8341-8e13e817172c\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.749548 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-ssh-key-openstack-edpm-ipam\") pod \"3843c5b3-e955-466a-8341-8e13e817172c\" (UID: \"3843c5b3-e955-466a-8341-8e13e817172c\") " Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.757765 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3843c5b3-e955-466a-8341-8e13e817172c-kube-api-access-jx82n" (OuterVolumeSpecName: "kube-api-access-jx82n") pod "3843c5b3-e955-466a-8341-8e13e817172c" (UID: "3843c5b3-e955-466a-8341-8e13e817172c"). InnerVolumeSpecName "kube-api-access-jx82n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.798164 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "3843c5b3-e955-466a-8341-8e13e817172c" (UID: "3843c5b3-e955-466a-8341-8e13e817172c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.801268 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-inventory" (OuterVolumeSpecName: "inventory") pod "3843c5b3-e955-466a-8341-8e13e817172c" (UID: "3843c5b3-e955-466a-8341-8e13e817172c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.852625 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx82n\" (UniqueName: \"kubernetes.io/projected/3843c5b3-e955-466a-8341-8e13e817172c-kube-api-access-jx82n\") on node \"crc\" DevicePath \"\"" Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.852694 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:37:03 crc kubenswrapper[5032]: I0122 16:37:03.852726 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3843c5b3-e955-466a-8341-8e13e817172c-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.090277 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" event={"ID":"3843c5b3-e955-466a-8341-8e13e817172c","Type":"ContainerDied","Data":"a891810f0c6e3d42dbcb1f08bdbc817c1f170cf81c72a82272503259503732f4"} Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.090646 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a891810f0c6e3d42dbcb1f08bdbc817c1f170cf81c72a82272503259503732f4" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.090361 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-9nk97" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.316415 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x"] Jan 22 16:37:04 crc kubenswrapper[5032]: E0122 16:37:04.317567 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3843c5b3-e955-466a-8341-8e13e817172c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.317615 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="3843c5b3-e955-466a-8341-8e13e817172c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.318168 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="3843c5b3-e955-466a-8341-8e13e817172c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.319526 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.323472 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.323844 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.326841 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.327314 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.330989 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x"] Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.474112 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.474204 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.474269 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/8b6774b8-1373-4f87-980c-18189e328582-kube-api-access-r8zs8\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.576363 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/8b6774b8-1373-4f87-980c-18189e328582-kube-api-access-r8zs8\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.577133 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.579583 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.586482 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.586532 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.602551 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/8b6774b8-1373-4f87-980c-18189e328582-kube-api-access-r8zs8\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qv94x\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:04 crc kubenswrapper[5032]: I0122 16:37:04.657404 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:37:05 crc kubenswrapper[5032]: I0122 16:37:05.004984 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x"] Jan 22 16:37:05 crc kubenswrapper[5032]: I0122 16:37:05.103118 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" event={"ID":"8b6774b8-1373-4f87-980c-18189e328582","Type":"ContainerStarted","Data":"5506348ed44525f4e224fb5b4d7d235182cc4785e6d2d328f6cc13fb6d4f373d"} Jan 22 16:37:06 crc kubenswrapper[5032]: I0122 16:37:06.118034 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" event={"ID":"8b6774b8-1373-4f87-980c-18189e328582","Type":"ContainerStarted","Data":"879261a9e04cabb3971ae3bb75fe9e373dcfa62ba1cacbf9125a2baedd80fdff"} Jan 22 16:37:06 crc kubenswrapper[5032]: I0122 16:37:06.151518 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" podStartSLOduration=1.70278675 podStartE2EDuration="2.151485799s" podCreationTimestamp="2026-01-22 16:37:04 +0000 UTC" firstStartedPulling="2026-01-22 16:37:05.01895222 +0000 UTC m=+1928.831121261" lastFinishedPulling="2026-01-22 16:37:05.467651229 +0000 UTC m=+1929.279820310" observedRunningTime="2026-01-22 16:37:06.143941656 +0000 UTC m=+1929.956110757" watchObservedRunningTime="2026-01-22 16:37:06.151485799 +0000 UTC m=+1929.963654870" Jan 22 16:37:19 crc kubenswrapper[5032]: I0122 16:37:19.262079 5032 scope.go:117] "RemoveContainer" containerID="bb2391b5fc691f89772bee8eb5d77072a576646b365bc2c9786db02ec1ad39b1" Jan 22 16:37:19 crc kubenswrapper[5032]: I0122 16:37:19.310443 5032 scope.go:117] "RemoveContainer" containerID="a0584988e7977e4d36817b6c17073626e3a40a7be839a06231d9027ec0e7c06c" Jan 22 16:37:32 crc kubenswrapper[5032]: I0122 16:37:32.041920 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-tdfk2"] Jan 22 16:37:32 crc kubenswrapper[5032]: I0122 16:37:32.051489 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-tdfk2"] Jan 22 16:37:32 crc kubenswrapper[5032]: I0122 16:37:32.466259 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e41215ab-724d-4efa-a10c-b1573c1bc3c7" path="/var/lib/kubelet/pods/e41215ab-724d-4efa-a10c-b1573c1bc3c7/volumes" Jan 22 16:38:03 crc kubenswrapper[5032]: I0122 16:38:03.016803 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:38:03 crc kubenswrapper[5032]: I0122 16:38:03.017528 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:38:03 crc kubenswrapper[5032]: I0122 16:38:03.746825 5032 generic.go:334] "Generic (PLEG): container finished" podID="8b6774b8-1373-4f87-980c-18189e328582" containerID="879261a9e04cabb3971ae3bb75fe9e373dcfa62ba1cacbf9125a2baedd80fdff" exitCode=0 Jan 22 16:38:03 crc kubenswrapper[5032]: I0122 16:38:03.746923 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" event={"ID":"8b6774b8-1373-4f87-980c-18189e328582","Type":"ContainerDied","Data":"879261a9e04cabb3971ae3bb75fe9e373dcfa62ba1cacbf9125a2baedd80fdff"} Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.223131 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.340498 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-ssh-key-openstack-edpm-ipam\") pod \"8b6774b8-1373-4f87-980c-18189e328582\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.340572 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/8b6774b8-1373-4f87-980c-18189e328582-kube-api-access-r8zs8\") pod \"8b6774b8-1373-4f87-980c-18189e328582\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.340725 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-inventory\") pod \"8b6774b8-1373-4f87-980c-18189e328582\" (UID: \"8b6774b8-1373-4f87-980c-18189e328582\") " Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.359345 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b6774b8-1373-4f87-980c-18189e328582-kube-api-access-r8zs8" (OuterVolumeSpecName: "kube-api-access-r8zs8") pod "8b6774b8-1373-4f87-980c-18189e328582" (UID: "8b6774b8-1373-4f87-980c-18189e328582"). InnerVolumeSpecName "kube-api-access-r8zs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.369890 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "8b6774b8-1373-4f87-980c-18189e328582" (UID: "8b6774b8-1373-4f87-980c-18189e328582"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.392782 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-inventory" (OuterVolumeSpecName: "inventory") pod "8b6774b8-1373-4f87-980c-18189e328582" (UID: "8b6774b8-1373-4f87-980c-18189e328582"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.443611 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.443668 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b6774b8-1373-4f87-980c-18189e328582-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.443692 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/8b6774b8-1373-4f87-980c-18189e328582-kube-api-access-r8zs8\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.801933 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" event={"ID":"8b6774b8-1373-4f87-980c-18189e328582","Type":"ContainerDied","Data":"5506348ed44525f4e224fb5b4d7d235182cc4785e6d2d328f6cc13fb6d4f373d"} Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.802322 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5506348ed44525f4e224fb5b4d7d235182cc4785e6d2d328f6cc13fb6d4f373d" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.802226 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qv94x" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.935887 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-94c2m"] Jan 22 16:38:05 crc kubenswrapper[5032]: E0122 16:38:05.936280 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b6774b8-1373-4f87-980c-18189e328582" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.936299 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b6774b8-1373-4f87-980c-18189e328582" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.936506 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b6774b8-1373-4f87-980c-18189e328582" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.937215 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.940030 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.940282 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.940443 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.944840 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:38:05 crc kubenswrapper[5032]: I0122 16:38:05.958694 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-94c2m"] Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.057836 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.057980 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.058030 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnb7p\" (UniqueName: \"kubernetes.io/projected/805a1eec-0fc3-4f40-8b05-92d0911b636f-kube-api-access-bnb7p\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.159668 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.159754 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.159799 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnb7p\" (UniqueName: \"kubernetes.io/projected/805a1eec-0fc3-4f40-8b05-92d0911b636f-kube-api-access-bnb7p\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.166497 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.168480 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.177979 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnb7p\" (UniqueName: \"kubernetes.io/projected/805a1eec-0fc3-4f40-8b05-92d0911b636f-kube-api-access-bnb7p\") pod \"ssh-known-hosts-edpm-deployment-94c2m\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.265545 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.842324 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-94c2m"] Jan 22 16:38:06 crc kubenswrapper[5032]: W0122 16:38:06.850191 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod805a1eec_0fc3_4f40_8b05_92d0911b636f.slice/crio-4fe00b82121d41d5c4528ec041b18de8dbe5d5256fca52c8566c33d31314a7d1 WatchSource:0}: Error finding container 4fe00b82121d41d5c4528ec041b18de8dbe5d5256fca52c8566c33d31314a7d1: Status 404 returned error can't find the container with id 4fe00b82121d41d5c4528ec041b18de8dbe5d5256fca52c8566c33d31314a7d1 Jan 22 16:38:06 crc kubenswrapper[5032]: I0122 16:38:06.853144 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:38:07 crc kubenswrapper[5032]: I0122 16:38:07.827512 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" event={"ID":"805a1eec-0fc3-4f40-8b05-92d0911b636f","Type":"ContainerStarted","Data":"4fe00b82121d41d5c4528ec041b18de8dbe5d5256fca52c8566c33d31314a7d1"} Jan 22 16:38:08 crc kubenswrapper[5032]: I0122 16:38:08.840958 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" event={"ID":"805a1eec-0fc3-4f40-8b05-92d0911b636f","Type":"ContainerStarted","Data":"8a93a5acea078c0e70e5aa539b8adedf650fdcaa52c98058a6e3bd970284d60b"} Jan 22 16:38:08 crc kubenswrapper[5032]: I0122 16:38:08.877121 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" podStartSLOduration=3.298557014 podStartE2EDuration="3.87710559s" podCreationTimestamp="2026-01-22 16:38:05 +0000 UTC" firstStartedPulling="2026-01-22 16:38:06.852907024 +0000 UTC m=+1990.665076055" lastFinishedPulling="2026-01-22 16:38:07.4314556 +0000 UTC m=+1991.243624631" observedRunningTime="2026-01-22 16:38:08.870285678 +0000 UTC m=+1992.682454749" watchObservedRunningTime="2026-01-22 16:38:08.87710559 +0000 UTC m=+1992.689274621" Jan 22 16:38:16 crc kubenswrapper[5032]: I0122 16:38:16.937392 5032 generic.go:334] "Generic (PLEG): container finished" podID="805a1eec-0fc3-4f40-8b05-92d0911b636f" containerID="8a93a5acea078c0e70e5aa539b8adedf650fdcaa52c98058a6e3bd970284d60b" exitCode=0 Jan 22 16:38:16 crc kubenswrapper[5032]: I0122 16:38:16.937505 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" event={"ID":"805a1eec-0fc3-4f40-8b05-92d0911b636f","Type":"ContainerDied","Data":"8a93a5acea078c0e70e5aa539b8adedf650fdcaa52c98058a6e3bd970284d60b"} Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.453065 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.554260 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-inventory-0\") pod \"805a1eec-0fc3-4f40-8b05-92d0911b636f\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.554352 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnb7p\" (UniqueName: \"kubernetes.io/projected/805a1eec-0fc3-4f40-8b05-92d0911b636f-kube-api-access-bnb7p\") pod \"805a1eec-0fc3-4f40-8b05-92d0911b636f\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.554460 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-ssh-key-openstack-edpm-ipam\") pod \"805a1eec-0fc3-4f40-8b05-92d0911b636f\" (UID: \"805a1eec-0fc3-4f40-8b05-92d0911b636f\") " Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.563887 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/805a1eec-0fc3-4f40-8b05-92d0911b636f-kube-api-access-bnb7p" (OuterVolumeSpecName: "kube-api-access-bnb7p") pod "805a1eec-0fc3-4f40-8b05-92d0911b636f" (UID: "805a1eec-0fc3-4f40-8b05-92d0911b636f"). InnerVolumeSpecName "kube-api-access-bnb7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.586534 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "805a1eec-0fc3-4f40-8b05-92d0911b636f" (UID: "805a1eec-0fc3-4f40-8b05-92d0911b636f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.588877 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "805a1eec-0fc3-4f40-8b05-92d0911b636f" (UID: "805a1eec-0fc3-4f40-8b05-92d0911b636f"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.656540 5032 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.656580 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnb7p\" (UniqueName: \"kubernetes.io/projected/805a1eec-0fc3-4f40-8b05-92d0911b636f-kube-api-access-bnb7p\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.656596 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/805a1eec-0fc3-4f40-8b05-92d0911b636f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.967339 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" event={"ID":"805a1eec-0fc3-4f40-8b05-92d0911b636f","Type":"ContainerDied","Data":"4fe00b82121d41d5c4528ec041b18de8dbe5d5256fca52c8566c33d31314a7d1"} Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.967693 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fe00b82121d41d5c4528ec041b18de8dbe5d5256fca52c8566c33d31314a7d1" Jan 22 16:38:18 crc kubenswrapper[5032]: I0122 16:38:18.967787 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-94c2m" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.054890 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7"] Jan 22 16:38:19 crc kubenswrapper[5032]: E0122 16:38:19.055611 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="805a1eec-0fc3-4f40-8b05-92d0911b636f" containerName="ssh-known-hosts-edpm-deployment" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.055711 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="805a1eec-0fc3-4f40-8b05-92d0911b636f" containerName="ssh-known-hosts-edpm-deployment" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.056061 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="805a1eec-0fc3-4f40-8b05-92d0911b636f" containerName="ssh-known-hosts-edpm-deployment" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.058548 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.061774 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.061818 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.062570 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.064141 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.065228 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7"] Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.167239 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.167325 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxmfj\" (UniqueName: \"kubernetes.io/projected/86b36ce9-5adf-443b-99ec-11d21696562b-kube-api-access-bxmfj\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.167738 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.270603 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.270805 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.271087 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxmfj\" (UniqueName: \"kubernetes.io/projected/86b36ce9-5adf-443b-99ec-11d21696562b-kube-api-access-bxmfj\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.277065 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.277422 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.297108 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxmfj\" (UniqueName: \"kubernetes.io/projected/86b36ce9-5adf-443b-99ec-11d21696562b-kube-api-access-bxmfj\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-rwnd7\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.409318 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:19 crc kubenswrapper[5032]: I0122 16:38:19.415180 5032 scope.go:117] "RemoveContainer" containerID="0fe190f1cbe52c0ec01dc502c0056bc43e43d23391f6e62a0684eb31a5a28db7" Jan 22 16:38:20 crc kubenswrapper[5032]: I0122 16:38:20.012370 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7"] Jan 22 16:38:20 crc kubenswrapper[5032]: I0122 16:38:20.984655 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" event={"ID":"86b36ce9-5adf-443b-99ec-11d21696562b","Type":"ContainerStarted","Data":"291704d365291aabc44a39a09d9a3a368dd5a4903b47fbd67738c0b9c6acf0da"} Jan 22 16:38:20 crc kubenswrapper[5032]: I0122 16:38:20.985047 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" event={"ID":"86b36ce9-5adf-443b-99ec-11d21696562b","Type":"ContainerStarted","Data":"8dcf8632146a7e26e76cc3baabbfa92347147102dbe0e315bc896d01ff5c68de"} Jan 22 16:38:21 crc kubenswrapper[5032]: I0122 16:38:21.022968 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" podStartSLOduration=1.532147169 podStartE2EDuration="2.02292333s" podCreationTimestamp="2026-01-22 16:38:19 +0000 UTC" firstStartedPulling="2026-01-22 16:38:20.019004982 +0000 UTC m=+2003.831174033" lastFinishedPulling="2026-01-22 16:38:20.509781143 +0000 UTC m=+2004.321950194" observedRunningTime="2026-01-22 16:38:21.011840104 +0000 UTC m=+2004.824009145" watchObservedRunningTime="2026-01-22 16:38:21.02292333 +0000 UTC m=+2004.835092371" Jan 22 16:38:30 crc kubenswrapper[5032]: I0122 16:38:30.065885 5032 generic.go:334] "Generic (PLEG): container finished" podID="86b36ce9-5adf-443b-99ec-11d21696562b" containerID="291704d365291aabc44a39a09d9a3a368dd5a4903b47fbd67738c0b9c6acf0da" exitCode=0 Jan 22 16:38:30 crc kubenswrapper[5032]: I0122 16:38:30.066037 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" event={"ID":"86b36ce9-5adf-443b-99ec-11d21696562b","Type":"ContainerDied","Data":"291704d365291aabc44a39a09d9a3a368dd5a4903b47fbd67738c0b9c6acf0da"} Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.606537 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.731375 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-ssh-key-openstack-edpm-ipam\") pod \"86b36ce9-5adf-443b-99ec-11d21696562b\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.731436 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxmfj\" (UniqueName: \"kubernetes.io/projected/86b36ce9-5adf-443b-99ec-11d21696562b-kube-api-access-bxmfj\") pod \"86b36ce9-5adf-443b-99ec-11d21696562b\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.731728 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-inventory\") pod \"86b36ce9-5adf-443b-99ec-11d21696562b\" (UID: \"86b36ce9-5adf-443b-99ec-11d21696562b\") " Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.744156 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86b36ce9-5adf-443b-99ec-11d21696562b-kube-api-access-bxmfj" (OuterVolumeSpecName: "kube-api-access-bxmfj") pod "86b36ce9-5adf-443b-99ec-11d21696562b" (UID: "86b36ce9-5adf-443b-99ec-11d21696562b"). InnerVolumeSpecName "kube-api-access-bxmfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.765507 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "86b36ce9-5adf-443b-99ec-11d21696562b" (UID: "86b36ce9-5adf-443b-99ec-11d21696562b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.767414 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-inventory" (OuterVolumeSpecName: "inventory") pod "86b36ce9-5adf-443b-99ec-11d21696562b" (UID: "86b36ce9-5adf-443b-99ec-11d21696562b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.834382 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.834432 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86b36ce9-5adf-443b-99ec-11d21696562b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:31 crc kubenswrapper[5032]: I0122 16:38:31.834449 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxmfj\" (UniqueName: \"kubernetes.io/projected/86b36ce9-5adf-443b-99ec-11d21696562b-kube-api-access-bxmfj\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.093145 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" event={"ID":"86b36ce9-5adf-443b-99ec-11d21696562b","Type":"ContainerDied","Data":"8dcf8632146a7e26e76cc3baabbfa92347147102dbe0e315bc896d01ff5c68de"} Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.093203 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-rwnd7" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.093215 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8dcf8632146a7e26e76cc3baabbfa92347147102dbe0e315bc896d01ff5c68de" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.186030 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6"] Jan 22 16:38:32 crc kubenswrapper[5032]: E0122 16:38:32.186975 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86b36ce9-5adf-443b-99ec-11d21696562b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.187006 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="86b36ce9-5adf-443b-99ec-11d21696562b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.187375 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="86b36ce9-5adf-443b-99ec-11d21696562b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.188449 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.191270 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.191758 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.192742 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.193747 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.201633 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6"] Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.245013 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.245090 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7sx8\" (UniqueName: \"kubernetes.io/projected/befce4e5-991c-438e-ac82-fa7f75091418-kube-api-access-x7sx8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.245178 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.347343 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.347431 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7sx8\" (UniqueName: \"kubernetes.io/projected/befce4e5-991c-438e-ac82-fa7f75091418-kube-api-access-x7sx8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.347489 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.353103 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.353113 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.365418 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7sx8\" (UniqueName: \"kubernetes.io/projected/befce4e5-991c-438e-ac82-fa7f75091418-kube-api-access-x7sx8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:32 crc kubenswrapper[5032]: I0122 16:38:32.517771 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:33 crc kubenswrapper[5032]: I0122 16:38:33.016548 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:38:33 crc kubenswrapper[5032]: I0122 16:38:33.016807 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:38:33 crc kubenswrapper[5032]: I0122 16:38:33.051296 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6"] Jan 22 16:38:33 crc kubenswrapper[5032]: I0122 16:38:33.105182 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" event={"ID":"befce4e5-991c-438e-ac82-fa7f75091418","Type":"ContainerStarted","Data":"d5fb96fa7d651c74e5d806d5490aa2b4d58d4271b68b8c017d2821459dffc565"} Jan 22 16:38:34 crc kubenswrapper[5032]: I0122 16:38:34.117338 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" event={"ID":"befce4e5-991c-438e-ac82-fa7f75091418","Type":"ContainerStarted","Data":"ae1a6521f26287ae26a56d035075c83520d0b34fb5f8f65385ffe922726981cb"} Jan 22 16:38:34 crc kubenswrapper[5032]: I0122 16:38:34.144568 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" podStartSLOduration=1.729988485 podStartE2EDuration="2.14453529s" podCreationTimestamp="2026-01-22 16:38:32 +0000 UTC" firstStartedPulling="2026-01-22 16:38:33.071449912 +0000 UTC m=+2016.883618963" lastFinishedPulling="2026-01-22 16:38:33.485996707 +0000 UTC m=+2017.298165768" observedRunningTime="2026-01-22 16:38:34.142757092 +0000 UTC m=+2017.954926163" watchObservedRunningTime="2026-01-22 16:38:34.14453529 +0000 UTC m=+2017.956704371" Jan 22 16:38:44 crc kubenswrapper[5032]: I0122 16:38:44.220190 5032 generic.go:334] "Generic (PLEG): container finished" podID="befce4e5-991c-438e-ac82-fa7f75091418" containerID="ae1a6521f26287ae26a56d035075c83520d0b34fb5f8f65385ffe922726981cb" exitCode=0 Jan 22 16:38:44 crc kubenswrapper[5032]: I0122 16:38:44.220291 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" event={"ID":"befce4e5-991c-438e-ac82-fa7f75091418","Type":"ContainerDied","Data":"ae1a6521f26287ae26a56d035075c83520d0b34fb5f8f65385ffe922726981cb"} Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.673736 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.844121 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-inventory\") pod \"befce4e5-991c-438e-ac82-fa7f75091418\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.844608 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-ssh-key-openstack-edpm-ipam\") pod \"befce4e5-991c-438e-ac82-fa7f75091418\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.844739 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7sx8\" (UniqueName: \"kubernetes.io/projected/befce4e5-991c-438e-ac82-fa7f75091418-kube-api-access-x7sx8\") pod \"befce4e5-991c-438e-ac82-fa7f75091418\" (UID: \"befce4e5-991c-438e-ac82-fa7f75091418\") " Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.850370 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/befce4e5-991c-438e-ac82-fa7f75091418-kube-api-access-x7sx8" (OuterVolumeSpecName: "kube-api-access-x7sx8") pod "befce4e5-991c-438e-ac82-fa7f75091418" (UID: "befce4e5-991c-438e-ac82-fa7f75091418"). InnerVolumeSpecName "kube-api-access-x7sx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.875948 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-inventory" (OuterVolumeSpecName: "inventory") pod "befce4e5-991c-438e-ac82-fa7f75091418" (UID: "befce4e5-991c-438e-ac82-fa7f75091418"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.878318 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "befce4e5-991c-438e-ac82-fa7f75091418" (UID: "befce4e5-991c-438e-ac82-fa7f75091418"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.947913 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.947975 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/befce4e5-991c-438e-ac82-fa7f75091418-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:45 crc kubenswrapper[5032]: I0122 16:38:45.947993 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7sx8\" (UniqueName: \"kubernetes.io/projected/befce4e5-991c-438e-ac82-fa7f75091418-kube-api-access-x7sx8\") on node \"crc\" DevicePath \"\"" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.258110 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" event={"ID":"befce4e5-991c-438e-ac82-fa7f75091418","Type":"ContainerDied","Data":"d5fb96fa7d651c74e5d806d5490aa2b4d58d4271b68b8c017d2821459dffc565"} Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.258173 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5fb96fa7d651c74e5d806d5490aa2b4d58d4271b68b8c017d2821459dffc565" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.258205 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.415188 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd"] Jan 22 16:38:46 crc kubenswrapper[5032]: E0122 16:38:46.415944 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="befce4e5-991c-438e-ac82-fa7f75091418" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.415978 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="befce4e5-991c-438e-ac82-fa7f75091418" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.416293 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="befce4e5-991c-438e-ac82-fa7f75091418" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.417365 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.422285 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.422538 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.423551 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.423746 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.423915 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.424086 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.424225 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.424357 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.435133 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd"] Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478138 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478197 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478268 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhkb4\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-kube-api-access-xhkb4\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478289 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478361 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478418 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478465 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478500 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478624 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478655 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478697 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478771 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478810 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.478878 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.580555 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.581062 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.581269 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.581505 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.581708 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.582005 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.582171 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.582398 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhkb4\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-kube-api-access-xhkb4\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.582955 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.583170 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.583358 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.583505 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.583613 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.583807 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.588024 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.588370 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.588704 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.589704 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.589707 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.590130 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.591247 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.591555 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.591962 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.593227 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.593397 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.594130 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.595114 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.609243 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhkb4\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-kube-api-access-xhkb4\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:46 crc kubenswrapper[5032]: I0122 16:38:46.743532 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:38:47 crc kubenswrapper[5032]: W0122 16:38:47.085257 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f8f814b_117a_4c75_a79f_bec5d67320e9.slice/crio-5f8fc4a0465eb5aa6aee542229db6bc014a348fe44a1d9e028d17c3fb22163ca WatchSource:0}: Error finding container 5f8fc4a0465eb5aa6aee542229db6bc014a348fe44a1d9e028d17c3fb22163ca: Status 404 returned error can't find the container with id 5f8fc4a0465eb5aa6aee542229db6bc014a348fe44a1d9e028d17c3fb22163ca Jan 22 16:38:47 crc kubenswrapper[5032]: I0122 16:38:47.091533 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd"] Jan 22 16:38:47 crc kubenswrapper[5032]: I0122 16:38:47.266797 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" event={"ID":"1f8f814b-117a-4c75-a79f-bec5d67320e9","Type":"ContainerStarted","Data":"5f8fc4a0465eb5aa6aee542229db6bc014a348fe44a1d9e028d17c3fb22163ca"} Jan 22 16:38:48 crc kubenswrapper[5032]: I0122 16:38:48.278211 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" event={"ID":"1f8f814b-117a-4c75-a79f-bec5d67320e9","Type":"ContainerStarted","Data":"630a2cd93d5abaec2bd9524a2f55af4c0fe22332eb9744684d6e3fa4613f5058"} Jan 22 16:38:48 crc kubenswrapper[5032]: I0122 16:38:48.300922 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" podStartSLOduration=1.6162907340000001 podStartE2EDuration="2.300902083s" podCreationTimestamp="2026-01-22 16:38:46 +0000 UTC" firstStartedPulling="2026-01-22 16:38:47.087588829 +0000 UTC m=+2030.899757900" lastFinishedPulling="2026-01-22 16:38:47.772200218 +0000 UTC m=+2031.584369249" observedRunningTime="2026-01-22 16:38:48.293946977 +0000 UTC m=+2032.106116018" watchObservedRunningTime="2026-01-22 16:38:48.300902083 +0000 UTC m=+2032.113071134" Jan 22 16:39:03 crc kubenswrapper[5032]: I0122 16:39:03.017483 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:39:03 crc kubenswrapper[5032]: I0122 16:39:03.018275 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:39:03 crc kubenswrapper[5032]: I0122 16:39:03.018342 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:39:03 crc kubenswrapper[5032]: I0122 16:39:03.019433 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e9f7a0f239afe7188bd3aa6ac00d194c903af47fec7e24da32038be8da79eccd"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:39:03 crc kubenswrapper[5032]: I0122 16:39:03.019542 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://e9f7a0f239afe7188bd3aa6ac00d194c903af47fec7e24da32038be8da79eccd" gracePeriod=600 Jan 22 16:39:04 crc kubenswrapper[5032]: I0122 16:39:04.448613 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="e9f7a0f239afe7188bd3aa6ac00d194c903af47fec7e24da32038be8da79eccd" exitCode=0 Jan 22 16:39:04 crc kubenswrapper[5032]: I0122 16:39:04.448731 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"e9f7a0f239afe7188bd3aa6ac00d194c903af47fec7e24da32038be8da79eccd"} Jan 22 16:39:04 crc kubenswrapper[5032]: I0122 16:39:04.449021 5032 scope.go:117] "RemoveContainer" containerID="8518ad2d993a9622a561dbfa80d8c491415d1f9c23531cb006b87173518eb403" Jan 22 16:39:05 crc kubenswrapper[5032]: I0122 16:39:05.461834 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709"} Jan 22 16:39:31 crc kubenswrapper[5032]: I0122 16:39:31.698848 5032 generic.go:334] "Generic (PLEG): container finished" podID="1f8f814b-117a-4c75-a79f-bec5d67320e9" containerID="630a2cd93d5abaec2bd9524a2f55af4c0fe22332eb9744684d6e3fa4613f5058" exitCode=0 Jan 22 16:39:31 crc kubenswrapper[5032]: I0122 16:39:31.698951 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" event={"ID":"1f8f814b-117a-4c75-a79f-bec5d67320e9","Type":"ContainerDied","Data":"630a2cd93d5abaec2bd9524a2f55af4c0fe22332eb9744684d6e3fa4613f5058"} Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.174477 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.289926 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhkb4\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-kube-api-access-xhkb4\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.289975 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-repo-setup-combined-ca-bundle\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290015 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-bootstrap-combined-ca-bundle\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290033 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ovn-combined-ca-bundle\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290063 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-inventory\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290115 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290140 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290168 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-nova-combined-ca-bundle\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290201 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290221 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-neutron-metadata-combined-ca-bundle\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290250 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-ovn-default-certs-0\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290283 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ssh-key-openstack-edpm-ipam\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290310 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-libvirt-combined-ca-bundle\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.290339 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-telemetry-combined-ca-bundle\") pod \"1f8f814b-117a-4c75-a79f-bec5d67320e9\" (UID: \"1f8f814b-117a-4c75-a79f-bec5d67320e9\") " Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.296544 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.296557 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.297537 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-kube-api-access-xhkb4" (OuterVolumeSpecName: "kube-api-access-xhkb4") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "kube-api-access-xhkb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.299539 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.300020 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.300108 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.300219 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.300754 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.301738 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.302921 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.308490 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.319543 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.337104 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-inventory" (OuterVolumeSpecName: "inventory") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.349929 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1f8f814b-117a-4c75-a79f-bec5d67320e9" (UID: "1f8f814b-117a-4c75-a79f-bec5d67320e9"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393213 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhkb4\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-kube-api-access-xhkb4\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393262 5032 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393285 5032 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393306 5032 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393326 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393345 5032 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393366 5032 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393385 5032 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393404 5032 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393421 5032 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393442 5032 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/1f8f814b-117a-4c75-a79f-bec5d67320e9-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393459 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393478 5032 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.393496 5032 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f814b-117a-4c75-a79f-bec5d67320e9-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.724119 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" event={"ID":"1f8f814b-117a-4c75-a79f-bec5d67320e9","Type":"ContainerDied","Data":"5f8fc4a0465eb5aa6aee542229db6bc014a348fe44a1d9e028d17c3fb22163ca"} Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.724367 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f8fc4a0465eb5aa6aee542229db6bc014a348fe44a1d9e028d17c3fb22163ca" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.724323 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.855904 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2"] Jan 22 16:39:33 crc kubenswrapper[5032]: E0122 16:39:33.856302 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8f814b-117a-4c75-a79f-bec5d67320e9" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.856318 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8f814b-117a-4c75-a79f-bec5d67320e9" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.856495 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f8f814b-117a-4c75-a79f-bec5d67320e9" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.857231 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.861476 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.861747 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.861978 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.862290 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.862455 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:39:33 crc kubenswrapper[5032]: I0122 16:39:33.866885 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2"] Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.005708 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.005818 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.005935 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvxvz\" (UniqueName: \"kubernetes.io/projected/1641ce01-3281-4f60-a092-48478c3b7fbb-kube-api-access-gvxvz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.006003 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.006078 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1641ce01-3281-4f60-a092-48478c3b7fbb-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.107489 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.107596 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.107659 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvxvz\" (UniqueName: \"kubernetes.io/projected/1641ce01-3281-4f60-a092-48478c3b7fbb-kube-api-access-gvxvz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.107712 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.107748 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1641ce01-3281-4f60-a092-48478c3b7fbb-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.109221 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1641ce01-3281-4f60-a092-48478c3b7fbb-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.113499 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.124569 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.124950 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.140239 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvxvz\" (UniqueName: \"kubernetes.io/projected/1641ce01-3281-4f60-a092-48478c3b7fbb-kube-api-access-gvxvz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8kkc2\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.174361 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.707015 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2"] Jan 22 16:39:34 crc kubenswrapper[5032]: I0122 16:39:34.733372 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" event={"ID":"1641ce01-3281-4f60-a092-48478c3b7fbb","Type":"ContainerStarted","Data":"8e8fc6c49e10a72a5cd81534d13f93b5d53461b5cbc880e7a2f35b726ca4b4c5"} Jan 22 16:39:35 crc kubenswrapper[5032]: I0122 16:39:35.747209 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" event={"ID":"1641ce01-3281-4f60-a092-48478c3b7fbb","Type":"ContainerStarted","Data":"0eba043bf8129efab4df76f19c5d6a5b814b9ff89265690ec9eef24ca51a3c6c"} Jan 22 16:39:35 crc kubenswrapper[5032]: I0122 16:39:35.780785 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" podStartSLOduration=2.332482901 podStartE2EDuration="2.780759627s" podCreationTimestamp="2026-01-22 16:39:33 +0000 UTC" firstStartedPulling="2026-01-22 16:39:34.700087168 +0000 UTC m=+2078.512256199" lastFinishedPulling="2026-01-22 16:39:35.148363884 +0000 UTC m=+2078.960532925" observedRunningTime="2026-01-22 16:39:35.76289013 +0000 UTC m=+2079.575059171" watchObservedRunningTime="2026-01-22 16:39:35.780759627 +0000 UTC m=+2079.592928678" Jan 22 16:40:45 crc kubenswrapper[5032]: I0122 16:40:45.464021 5032 generic.go:334] "Generic (PLEG): container finished" podID="1641ce01-3281-4f60-a092-48478c3b7fbb" containerID="0eba043bf8129efab4df76f19c5d6a5b814b9ff89265690ec9eef24ca51a3c6c" exitCode=0 Jan 22 16:40:45 crc kubenswrapper[5032]: I0122 16:40:45.464142 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" event={"ID":"1641ce01-3281-4f60-a092-48478c3b7fbb","Type":"ContainerDied","Data":"0eba043bf8129efab4df76f19c5d6a5b814b9ff89265690ec9eef24ca51a3c6c"} Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.002048 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.147113 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-inventory\") pod \"1641ce01-3281-4f60-a092-48478c3b7fbb\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.147171 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ssh-key-openstack-edpm-ipam\") pod \"1641ce01-3281-4f60-a092-48478c3b7fbb\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.147252 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvxvz\" (UniqueName: \"kubernetes.io/projected/1641ce01-3281-4f60-a092-48478c3b7fbb-kube-api-access-gvxvz\") pod \"1641ce01-3281-4f60-a092-48478c3b7fbb\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.147308 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1641ce01-3281-4f60-a092-48478c3b7fbb-ovncontroller-config-0\") pod \"1641ce01-3281-4f60-a092-48478c3b7fbb\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.147391 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ovn-combined-ca-bundle\") pod \"1641ce01-3281-4f60-a092-48478c3b7fbb\" (UID: \"1641ce01-3281-4f60-a092-48478c3b7fbb\") " Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.155052 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1641ce01-3281-4f60-a092-48478c3b7fbb-kube-api-access-gvxvz" (OuterVolumeSpecName: "kube-api-access-gvxvz") pod "1641ce01-3281-4f60-a092-48478c3b7fbb" (UID: "1641ce01-3281-4f60-a092-48478c3b7fbb"). InnerVolumeSpecName "kube-api-access-gvxvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.163133 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "1641ce01-3281-4f60-a092-48478c3b7fbb" (UID: "1641ce01-3281-4f60-a092-48478c3b7fbb"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.174096 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1641ce01-3281-4f60-a092-48478c3b7fbb-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "1641ce01-3281-4f60-a092-48478c3b7fbb" (UID: "1641ce01-3281-4f60-a092-48478c3b7fbb"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.176870 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-inventory" (OuterVolumeSpecName: "inventory") pod "1641ce01-3281-4f60-a092-48478c3b7fbb" (UID: "1641ce01-3281-4f60-a092-48478c3b7fbb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.185453 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1641ce01-3281-4f60-a092-48478c3b7fbb" (UID: "1641ce01-3281-4f60-a092-48478c3b7fbb"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.249632 5032 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.249669 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.249684 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1641ce01-3281-4f60-a092-48478c3b7fbb-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.249695 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvxvz\" (UniqueName: \"kubernetes.io/projected/1641ce01-3281-4f60-a092-48478c3b7fbb-kube-api-access-gvxvz\") on node \"crc\" DevicePath \"\"" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.249703 5032 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1641ce01-3281-4f60-a092-48478c3b7fbb-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.481980 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" event={"ID":"1641ce01-3281-4f60-a092-48478c3b7fbb","Type":"ContainerDied","Data":"8e8fc6c49e10a72a5cd81534d13f93b5d53461b5cbc880e7a2f35b726ca4b4c5"} Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.482621 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e8fc6c49e10a72a5cd81534d13f93b5d53461b5cbc880e7a2f35b726ca4b4c5" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.482129 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8kkc2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.612564 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2"] Jan 22 16:40:47 crc kubenswrapper[5032]: E0122 16:40:47.612951 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1641ce01-3281-4f60-a092-48478c3b7fbb" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.612967 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1641ce01-3281-4f60-a092-48478c3b7fbb" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.613267 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1641ce01-3281-4f60-a092-48478c3b7fbb" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.613997 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.619237 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.619238 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.619437 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.619565 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.619791 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.619921 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.629800 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2"] Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.757237 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.757355 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.757685 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.757807 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdpcf\" (UniqueName: \"kubernetes.io/projected/69dc6df4-16ad-4811-af1a-561a56a0e0c6-kube-api-access-rdpcf\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.757841 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.757930 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.859309 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.859372 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.859458 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.859496 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdpcf\" (UniqueName: \"kubernetes.io/projected/69dc6df4-16ad-4811-af1a-561a56a0e0c6-kube-api-access-rdpcf\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.859516 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.859930 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.865433 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.865553 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.866171 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.866816 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.872186 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.887848 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdpcf\" (UniqueName: \"kubernetes.io/projected/69dc6df4-16ad-4811-af1a-561a56a0e0c6-kube-api-access-rdpcf\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:47 crc kubenswrapper[5032]: I0122 16:40:47.944591 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:40:48 crc kubenswrapper[5032]: I0122 16:40:48.495028 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2"] Jan 22 16:40:49 crc kubenswrapper[5032]: I0122 16:40:49.503607 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" event={"ID":"69dc6df4-16ad-4811-af1a-561a56a0e0c6","Type":"ContainerStarted","Data":"bd42bf389566708af1fae41c25918ac1243e31119a41aac68c6f87fc97c93b7f"} Jan 22 16:40:49 crc kubenswrapper[5032]: I0122 16:40:49.504551 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" event={"ID":"69dc6df4-16ad-4811-af1a-561a56a0e0c6","Type":"ContainerStarted","Data":"a0206f26b93b8c9ff45f66067cef36fb54400c7358430f25f7300873db4f3cb5"} Jan 22 16:40:49 crc kubenswrapper[5032]: I0122 16:40:49.536853 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" podStartSLOduration=2.09571483 podStartE2EDuration="2.536828444s" podCreationTimestamp="2026-01-22 16:40:47 +0000 UTC" firstStartedPulling="2026-01-22 16:40:48.497269042 +0000 UTC m=+2152.309438073" lastFinishedPulling="2026-01-22 16:40:48.938382626 +0000 UTC m=+2152.750551687" observedRunningTime="2026-01-22 16:40:49.534617055 +0000 UTC m=+2153.346786096" watchObservedRunningTime="2026-01-22 16:40:49.536828444 +0000 UTC m=+2153.348997515" Jan 22 16:41:33 crc kubenswrapper[5032]: I0122 16:41:33.016752 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:41:33 crc kubenswrapper[5032]: I0122 16:41:33.017452 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:41:44 crc kubenswrapper[5032]: I0122 16:41:44.089114 5032 generic.go:334] "Generic (PLEG): container finished" podID="69dc6df4-16ad-4811-af1a-561a56a0e0c6" containerID="bd42bf389566708af1fae41c25918ac1243e31119a41aac68c6f87fc97c93b7f" exitCode=0 Jan 22 16:41:44 crc kubenswrapper[5032]: I0122 16:41:44.089224 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" event={"ID":"69dc6df4-16ad-4811-af1a-561a56a0e0c6","Type":"ContainerDied","Data":"bd42bf389566708af1fae41c25918ac1243e31119a41aac68c6f87fc97c93b7f"} Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.457632 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7wcmv"] Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.462995 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.469409 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7wcmv"] Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.593681 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.593939 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-catalog-content\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.595298 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-utilities\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.595567 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsjxb\" (UniqueName: \"kubernetes.io/projected/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-kube-api-access-dsjxb\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.697041 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-ssh-key-openstack-edpm-ipam\") pod \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.697162 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-metadata-combined-ca-bundle\") pod \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.697214 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.697302 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-inventory\") pod \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.697370 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-nova-metadata-neutron-config-0\") pod \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.697484 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdpcf\" (UniqueName: \"kubernetes.io/projected/69dc6df4-16ad-4811-af1a-561a56a0e0c6-kube-api-access-rdpcf\") pod \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\" (UID: \"69dc6df4-16ad-4811-af1a-561a56a0e0c6\") " Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.697893 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-utilities\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.698016 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsjxb\" (UniqueName: \"kubernetes.io/projected/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-kube-api-access-dsjxb\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.698137 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-catalog-content\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.698525 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-utilities\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.698678 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-catalog-content\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.704811 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69dc6df4-16ad-4811-af1a-561a56a0e0c6-kube-api-access-rdpcf" (OuterVolumeSpecName: "kube-api-access-rdpcf") pod "69dc6df4-16ad-4811-af1a-561a56a0e0c6" (UID: "69dc6df4-16ad-4811-af1a-561a56a0e0c6"). InnerVolumeSpecName "kube-api-access-rdpcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.704967 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "69dc6df4-16ad-4811-af1a-561a56a0e0c6" (UID: "69dc6df4-16ad-4811-af1a-561a56a0e0c6"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.715310 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsjxb\" (UniqueName: \"kubernetes.io/projected/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-kube-api-access-dsjxb\") pod \"community-operators-7wcmv\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.724757 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "69dc6df4-16ad-4811-af1a-561a56a0e0c6" (UID: "69dc6df4-16ad-4811-af1a-561a56a0e0c6"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.727349 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "69dc6df4-16ad-4811-af1a-561a56a0e0c6" (UID: "69dc6df4-16ad-4811-af1a-561a56a0e0c6"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.739340 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-inventory" (OuterVolumeSpecName: "inventory") pod "69dc6df4-16ad-4811-af1a-561a56a0e0c6" (UID: "69dc6df4-16ad-4811-af1a-561a56a0e0c6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.741943 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "69dc6df4-16ad-4811-af1a-561a56a0e0c6" (UID: "69dc6df4-16ad-4811-af1a-561a56a0e0c6"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.800087 5032 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.800116 5032 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.800132 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.800144 5032 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.800157 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdpcf\" (UniqueName: \"kubernetes.io/projected/69dc6df4-16ad-4811-af1a-561a56a0e0c6-kube-api-access-rdpcf\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.800171 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/69dc6df4-16ad-4811-af1a-561a56a0e0c6-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:45 crc kubenswrapper[5032]: I0122 16:41:45.886444 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.112762 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" event={"ID":"69dc6df4-16ad-4811-af1a-561a56a0e0c6","Type":"ContainerDied","Data":"a0206f26b93b8c9ff45f66067cef36fb54400c7358430f25f7300873db4f3cb5"} Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.113116 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0206f26b93b8c9ff45f66067cef36fb54400c7358430f25f7300873db4f3cb5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.112819 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.309180 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5"] Jan 22 16:41:46 crc kubenswrapper[5032]: E0122 16:41:46.309582 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69dc6df4-16ad-4811-af1a-561a56a0e0c6" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.309594 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="69dc6df4-16ad-4811-af1a-561a56a0e0c6" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.309796 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="69dc6df4-16ad-4811-af1a-561a56a0e0c6" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.316562 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.323272 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.323391 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.323434 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.323468 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.323284 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.337668 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5"] Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.381960 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7wcmv"] Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.411990 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.412088 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.412140 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.412180 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.412226 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f28qr\" (UniqueName: \"kubernetes.io/projected/edd38900-30d9-4f92-9e58-31002222e3ce-kube-api-access-f28qr\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.514237 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.514696 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.514746 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.514802 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.514888 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f28qr\" (UniqueName: \"kubernetes.io/projected/edd38900-30d9-4f92-9e58-31002222e3ce-kube-api-access-f28qr\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.521645 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.522200 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.522278 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.528428 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.538556 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f28qr\" (UniqueName: \"kubernetes.io/projected/edd38900-30d9-4f92-9e58-31002222e3ce-kube-api-access-f28qr\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:46 crc kubenswrapper[5032]: I0122 16:41:46.644226 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:41:47 crc kubenswrapper[5032]: I0122 16:41:47.131049 5032 generic.go:334] "Generic (PLEG): container finished" podID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerID="c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7" exitCode=0 Jan 22 16:41:47 crc kubenswrapper[5032]: I0122 16:41:47.131178 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7wcmv" event={"ID":"9ceaf6e2-785e-41e0-b218-4c6a92ad5153","Type":"ContainerDied","Data":"c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7"} Jan 22 16:41:47 crc kubenswrapper[5032]: I0122 16:41:47.131593 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7wcmv" event={"ID":"9ceaf6e2-785e-41e0-b218-4c6a92ad5153","Type":"ContainerStarted","Data":"5f80d55bdfb41933cc2e4624e7d634e4545e39bd4a868fc9cafc60b77f84488e"} Jan 22 16:41:47 crc kubenswrapper[5032]: I0122 16:41:47.265237 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5"] Jan 22 16:41:47 crc kubenswrapper[5032]: W0122 16:41:47.275304 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedd38900_30d9_4f92_9e58_31002222e3ce.slice/crio-1055ebcb1f0a47c3827730ed22fc7dd0bd44f953022ba82697d1d2fa1e657cfd WatchSource:0}: Error finding container 1055ebcb1f0a47c3827730ed22fc7dd0bd44f953022ba82697d1d2fa1e657cfd: Status 404 returned error can't find the container with id 1055ebcb1f0a47c3827730ed22fc7dd0bd44f953022ba82697d1d2fa1e657cfd Jan 22 16:41:48 crc kubenswrapper[5032]: I0122 16:41:48.141717 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" event={"ID":"edd38900-30d9-4f92-9e58-31002222e3ce","Type":"ContainerStarted","Data":"1055ebcb1f0a47c3827730ed22fc7dd0bd44f953022ba82697d1d2fa1e657cfd"} Jan 22 16:41:49 crc kubenswrapper[5032]: I0122 16:41:49.154072 5032 generic.go:334] "Generic (PLEG): container finished" podID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerID="c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241" exitCode=0 Jan 22 16:41:49 crc kubenswrapper[5032]: I0122 16:41:49.154354 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7wcmv" event={"ID":"9ceaf6e2-785e-41e0-b218-4c6a92ad5153","Type":"ContainerDied","Data":"c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241"} Jan 22 16:41:49 crc kubenswrapper[5032]: I0122 16:41:49.159356 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" event={"ID":"edd38900-30d9-4f92-9e58-31002222e3ce","Type":"ContainerStarted","Data":"ccc1bb5ce88a432bc142f22c22a581e1f315875d399cfe6524c4858b8a5e204e"} Jan 22 16:41:49 crc kubenswrapper[5032]: I0122 16:41:49.198564 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" podStartSLOduration=2.580696324 podStartE2EDuration="3.198516342s" podCreationTimestamp="2026-01-22 16:41:46 +0000 UTC" firstStartedPulling="2026-01-22 16:41:47.278081114 +0000 UTC m=+2211.090250145" lastFinishedPulling="2026-01-22 16:41:47.895901132 +0000 UTC m=+2211.708070163" observedRunningTime="2026-01-22 16:41:49.189619158 +0000 UTC m=+2213.001788189" watchObservedRunningTime="2026-01-22 16:41:49.198516342 +0000 UTC m=+2213.010685403" Jan 22 16:41:50 crc kubenswrapper[5032]: I0122 16:41:50.175325 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7wcmv" event={"ID":"9ceaf6e2-785e-41e0-b218-4c6a92ad5153","Type":"ContainerStarted","Data":"3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583"} Jan 22 16:41:50 crc kubenswrapper[5032]: I0122 16:41:50.208607 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7wcmv" podStartSLOduration=2.803698005 podStartE2EDuration="5.208578053s" podCreationTimestamp="2026-01-22 16:41:45 +0000 UTC" firstStartedPulling="2026-01-22 16:41:47.137883439 +0000 UTC m=+2210.950052510" lastFinishedPulling="2026-01-22 16:41:49.542763487 +0000 UTC m=+2213.354932558" observedRunningTime="2026-01-22 16:41:50.204021083 +0000 UTC m=+2214.016190154" watchObservedRunningTime="2026-01-22 16:41:50.208578053 +0000 UTC m=+2214.020747124" Jan 22 16:41:55 crc kubenswrapper[5032]: I0122 16:41:55.887132 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:55 crc kubenswrapper[5032]: I0122 16:41:55.887772 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:55 crc kubenswrapper[5032]: I0122 16:41:55.962726 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:56 crc kubenswrapper[5032]: I0122 16:41:56.304497 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:56 crc kubenswrapper[5032]: I0122 16:41:56.363756 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7wcmv"] Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.289242 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7wcmv" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="registry-server" containerID="cri-o://3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583" gracePeriod=2 Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.630463 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gz7ds"] Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.633261 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.649916 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gz7ds"] Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.740392 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.770381 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-utilities\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.770745 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-catalog-content\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.770839 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djk97\" (UniqueName: \"kubernetes.io/projected/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-kube-api-access-djk97\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.872228 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-utilities\") pod \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.872304 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsjxb\" (UniqueName: \"kubernetes.io/projected/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-kube-api-access-dsjxb\") pod \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.872390 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-catalog-content\") pod \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\" (UID: \"9ceaf6e2-785e-41e0-b218-4c6a92ad5153\") " Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.873303 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-utilities" (OuterVolumeSpecName: "utilities") pod "9ceaf6e2-785e-41e0-b218-4c6a92ad5153" (UID: "9ceaf6e2-785e-41e0-b218-4c6a92ad5153"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.879561 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-utilities\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.879736 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-catalog-content\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.879785 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djk97\" (UniqueName: \"kubernetes.io/projected/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-kube-api-access-djk97\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.879842 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.880702 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-utilities\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.880812 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-catalog-content\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.896518 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djk97\" (UniqueName: \"kubernetes.io/projected/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-kube-api-access-djk97\") pod \"certified-operators-gz7ds\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.899668 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-kube-api-access-dsjxb" (OuterVolumeSpecName: "kube-api-access-dsjxb") pod "9ceaf6e2-785e-41e0-b218-4c6a92ad5153" (UID: "9ceaf6e2-785e-41e0-b218-4c6a92ad5153"). InnerVolumeSpecName "kube-api-access-dsjxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.930468 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ceaf6e2-785e-41e0-b218-4c6a92ad5153" (UID: "9ceaf6e2-785e-41e0-b218-4c6a92ad5153"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.981263 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsjxb\" (UniqueName: \"kubernetes.io/projected/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-kube-api-access-dsjxb\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:58 crc kubenswrapper[5032]: I0122 16:41:58.981297 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ceaf6e2-785e-41e0-b218-4c6a92ad5153-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.037308 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.299561 5032 generic.go:334] "Generic (PLEG): container finished" podID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerID="3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583" exitCode=0 Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.299738 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7wcmv" event={"ID":"9ceaf6e2-785e-41e0-b218-4c6a92ad5153","Type":"ContainerDied","Data":"3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583"} Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.300075 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7wcmv" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.300093 5032 scope.go:117] "RemoveContainer" containerID="3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.300073 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7wcmv" event={"ID":"9ceaf6e2-785e-41e0-b218-4c6a92ad5153","Type":"ContainerDied","Data":"5f80d55bdfb41933cc2e4624e7d634e4545e39bd4a868fc9cafc60b77f84488e"} Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.320720 5032 scope.go:117] "RemoveContainer" containerID="c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.337533 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7wcmv"] Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.344586 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7wcmv"] Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.359346 5032 scope.go:117] "RemoveContainer" containerID="c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.373800 5032 scope.go:117] "RemoveContainer" containerID="3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583" Jan 22 16:41:59 crc kubenswrapper[5032]: E0122 16:41:59.374388 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583\": container with ID starting with 3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583 not found: ID does not exist" containerID="3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.374525 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583"} err="failed to get container status \"3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583\": rpc error: code = NotFound desc = could not find container \"3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583\": container with ID starting with 3cd29ed9192387a836dc67d9f187a13e4ce071b30ee7a06f3d677ec3cd0ef583 not found: ID does not exist" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.374672 5032 scope.go:117] "RemoveContainer" containerID="c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241" Jan 22 16:41:59 crc kubenswrapper[5032]: E0122 16:41:59.375236 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241\": container with ID starting with c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241 not found: ID does not exist" containerID="c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.375274 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241"} err="failed to get container status \"c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241\": rpc error: code = NotFound desc = could not find container \"c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241\": container with ID starting with c8e5ac1c4e6fe1084871289ad4de66c21206600d2e8d04c88cafbbe1e3ed0241 not found: ID does not exist" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.375299 5032 scope.go:117] "RemoveContainer" containerID="c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7" Jan 22 16:41:59 crc kubenswrapper[5032]: E0122 16:41:59.375544 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7\": container with ID starting with c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7 not found: ID does not exist" containerID="c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.375655 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7"} err="failed to get container status \"c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7\": rpc error: code = NotFound desc = could not find container \"c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7\": container with ID starting with c5fadc392f65b3cba2da980eb7ae15eeb867591058e4509a38048c139a82d6e7 not found: ID does not exist" Jan 22 16:41:59 crc kubenswrapper[5032]: I0122 16:41:59.548652 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gz7ds"] Jan 22 16:41:59 crc kubenswrapper[5032]: W0122 16:41:59.552908 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b3670b3_2c9c_4d1f_9b85_0eab1601c474.slice/crio-37370d76fb5b3b2307f49d4bb558c27b2c9d940035180dd97445125358e321dd WatchSource:0}: Error finding container 37370d76fb5b3b2307f49d4bb558c27b2c9d940035180dd97445125358e321dd: Status 404 returned error can't find the container with id 37370d76fb5b3b2307f49d4bb558c27b2c9d940035180dd97445125358e321dd Jan 22 16:42:00 crc kubenswrapper[5032]: I0122 16:42:00.317557 5032 generic.go:334] "Generic (PLEG): container finished" podID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerID="4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62" exitCode=0 Jan 22 16:42:00 crc kubenswrapper[5032]: I0122 16:42:00.317671 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gz7ds" event={"ID":"6b3670b3-2c9c-4d1f-9b85-0eab1601c474","Type":"ContainerDied","Data":"4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62"} Jan 22 16:42:00 crc kubenswrapper[5032]: I0122 16:42:00.317713 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gz7ds" event={"ID":"6b3670b3-2c9c-4d1f-9b85-0eab1601c474","Type":"ContainerStarted","Data":"37370d76fb5b3b2307f49d4bb558c27b2c9d940035180dd97445125358e321dd"} Jan 22 16:42:00 crc kubenswrapper[5032]: I0122 16:42:00.470481 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" path="/var/lib/kubelet/pods/9ceaf6e2-785e-41e0-b218-4c6a92ad5153/volumes" Jan 22 16:42:02 crc kubenswrapper[5032]: I0122 16:42:02.340096 5032 generic.go:334] "Generic (PLEG): container finished" podID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerID="bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89" exitCode=0 Jan 22 16:42:02 crc kubenswrapper[5032]: I0122 16:42:02.340159 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gz7ds" event={"ID":"6b3670b3-2c9c-4d1f-9b85-0eab1601c474","Type":"ContainerDied","Data":"bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89"} Jan 22 16:42:03 crc kubenswrapper[5032]: I0122 16:42:03.016273 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:42:03 crc kubenswrapper[5032]: I0122 16:42:03.016616 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:42:03 crc kubenswrapper[5032]: I0122 16:42:03.349432 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gz7ds" event={"ID":"6b3670b3-2c9c-4d1f-9b85-0eab1601c474","Type":"ContainerStarted","Data":"dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941"} Jan 22 16:42:03 crc kubenswrapper[5032]: I0122 16:42:03.374685 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gz7ds" podStartSLOduration=2.8874934789999998 podStartE2EDuration="5.374660635s" podCreationTimestamp="2026-01-22 16:41:58 +0000 UTC" firstStartedPulling="2026-01-22 16:42:00.323310098 +0000 UTC m=+2224.135479169" lastFinishedPulling="2026-01-22 16:42:02.810477294 +0000 UTC m=+2226.622646325" observedRunningTime="2026-01-22 16:42:03.368634217 +0000 UTC m=+2227.180803248" watchObservedRunningTime="2026-01-22 16:42:03.374660635 +0000 UTC m=+2227.186829666" Jan 22 16:42:09 crc kubenswrapper[5032]: I0122 16:42:09.037461 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:42:09 crc kubenswrapper[5032]: I0122 16:42:09.038145 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:42:09 crc kubenswrapper[5032]: I0122 16:42:09.125000 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:42:09 crc kubenswrapper[5032]: I0122 16:42:09.446411 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:42:09 crc kubenswrapper[5032]: I0122 16:42:09.495034 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gz7ds"] Jan 22 16:42:11 crc kubenswrapper[5032]: I0122 16:42:11.429135 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gz7ds" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="registry-server" containerID="cri-o://dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941" gracePeriod=2 Jan 22 16:42:11 crc kubenswrapper[5032]: I0122 16:42:11.927743 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:42:11 crc kubenswrapper[5032]: I0122 16:42:11.931377 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-catalog-content\") pod \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " Jan 22 16:42:11 crc kubenswrapper[5032]: I0122 16:42:11.931465 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djk97\" (UniqueName: \"kubernetes.io/projected/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-kube-api-access-djk97\") pod \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " Jan 22 16:42:11 crc kubenswrapper[5032]: I0122 16:42:11.931515 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-utilities\") pod \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\" (UID: \"6b3670b3-2c9c-4d1f-9b85-0eab1601c474\") " Jan 22 16:42:11 crc kubenswrapper[5032]: I0122 16:42:11.933033 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-utilities" (OuterVolumeSpecName: "utilities") pod "6b3670b3-2c9c-4d1f-9b85-0eab1601c474" (UID: "6b3670b3-2c9c-4d1f-9b85-0eab1601c474"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:42:11 crc kubenswrapper[5032]: I0122 16:42:11.939995 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-kube-api-access-djk97" (OuterVolumeSpecName: "kube-api-access-djk97") pod "6b3670b3-2c9c-4d1f-9b85-0eab1601c474" (UID: "6b3670b3-2c9c-4d1f-9b85-0eab1601c474"). InnerVolumeSpecName "kube-api-access-djk97". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.033052 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djk97\" (UniqueName: \"kubernetes.io/projected/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-kube-api-access-djk97\") on node \"crc\" DevicePath \"\"" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.033421 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.073590 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b3670b3-2c9c-4d1f-9b85-0eab1601c474" (UID: "6b3670b3-2c9c-4d1f-9b85-0eab1601c474"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.134546 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b3670b3-2c9c-4d1f-9b85-0eab1601c474-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.440461 5032 generic.go:334] "Generic (PLEG): container finished" podID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerID="dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941" exitCode=0 Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.440504 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gz7ds" event={"ID":"6b3670b3-2c9c-4d1f-9b85-0eab1601c474","Type":"ContainerDied","Data":"dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941"} Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.440531 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gz7ds" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.440542 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gz7ds" event={"ID":"6b3670b3-2c9c-4d1f-9b85-0eab1601c474","Type":"ContainerDied","Data":"37370d76fb5b3b2307f49d4bb558c27b2c9d940035180dd97445125358e321dd"} Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.440563 5032 scope.go:117] "RemoveContainer" containerID="dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.463292 5032 scope.go:117] "RemoveContainer" containerID="bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.478655 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gz7ds"] Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.488095 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gz7ds"] Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.510778 5032 scope.go:117] "RemoveContainer" containerID="4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.546922 5032 scope.go:117] "RemoveContainer" containerID="dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941" Jan 22 16:42:12 crc kubenswrapper[5032]: E0122 16:42:12.547538 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941\": container with ID starting with dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941 not found: ID does not exist" containerID="dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.547579 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941"} err="failed to get container status \"dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941\": rpc error: code = NotFound desc = could not find container \"dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941\": container with ID starting with dc738b4e67262f26c47097ec29717d7161f19161745ad0610593992b0d9ef941 not found: ID does not exist" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.547603 5032 scope.go:117] "RemoveContainer" containerID="bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89" Jan 22 16:42:12 crc kubenswrapper[5032]: E0122 16:42:12.548006 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89\": container with ID starting with bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89 not found: ID does not exist" containerID="bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.548038 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89"} err="failed to get container status \"bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89\": rpc error: code = NotFound desc = could not find container \"bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89\": container with ID starting with bb5b463e7cfaefcd9827e33e3c7fc1e9b07f434c4ce2219290105d315a3c8a89 not found: ID does not exist" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.548056 5032 scope.go:117] "RemoveContainer" containerID="4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62" Jan 22 16:42:12 crc kubenswrapper[5032]: E0122 16:42:12.548379 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62\": container with ID starting with 4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62 not found: ID does not exist" containerID="4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62" Jan 22 16:42:12 crc kubenswrapper[5032]: I0122 16:42:12.548422 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62"} err="failed to get container status \"4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62\": rpc error: code = NotFound desc = could not find container \"4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62\": container with ID starting with 4e3a1f581ba8db20ae2404a6dcba8d7d1042a104111292693f0070632dda7e62 not found: ID does not exist" Jan 22 16:42:14 crc kubenswrapper[5032]: I0122 16:42:14.485577 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" path="/var/lib/kubelet/pods/6b3670b3-2c9c-4d1f-9b85-0eab1601c474/volumes" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.016982 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.017601 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.017663 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.018608 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.018674 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" gracePeriod=600 Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.150565 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230106 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-djfpc"] Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.230480 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="extract-utilities" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230496 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="extract-utilities" Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.230511 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="registry-server" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230517 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="registry-server" Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.230529 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="registry-server" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230535 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="registry-server" Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.230553 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="extract-content" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230559 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="extract-content" Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.230572 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="extract-utilities" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230578 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="extract-utilities" Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.230601 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="extract-content" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230607 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="extract-content" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230767 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ceaf6e2-785e-41e0-b218-4c6a92ad5153" containerName="registry-server" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.230785 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b3670b3-2c9c-4d1f-9b85-0eab1601c474" containerName="registry-server" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.232080 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.259449 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-djfpc"] Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.303730 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7btpr\" (UniqueName: \"kubernetes.io/projected/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-kube-api-access-7btpr\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.303817 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-catalog-content\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.303983 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-utilities\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.406062 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7btpr\" (UniqueName: \"kubernetes.io/projected/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-kube-api-access-7btpr\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.406132 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-catalog-content\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.406225 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-utilities\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.406892 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-catalog-content\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.406911 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-utilities\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.458160 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7btpr\" (UniqueName: \"kubernetes.io/projected/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-kube-api-access-7btpr\") pod \"redhat-marketplace-djfpc\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.549411 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.682805 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" exitCode=0 Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.683089 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709"} Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.683125 5032 scope.go:117] "RemoveContainer" containerID="e9f7a0f239afe7188bd3aa6ac00d194c903af47fec7e24da32038be8da79eccd" Jan 22 16:42:33 crc kubenswrapper[5032]: I0122 16:42:33.683743 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:42:33 crc kubenswrapper[5032]: E0122 16:42:33.684036 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:42:34 crc kubenswrapper[5032]: I0122 16:42:34.089656 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-djfpc"] Jan 22 16:42:34 crc kubenswrapper[5032]: I0122 16:42:34.690903 5032 generic.go:334] "Generic (PLEG): container finished" podID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerID="af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861" exitCode=0 Jan 22 16:42:34 crc kubenswrapper[5032]: I0122 16:42:34.691250 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djfpc" event={"ID":"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1","Type":"ContainerDied","Data":"af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861"} Jan 22 16:42:34 crc kubenswrapper[5032]: I0122 16:42:34.691274 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djfpc" event={"ID":"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1","Type":"ContainerStarted","Data":"4a51408abfee916893a1923e29728276b0efd3ee0e78362bd6397bfb58128b01"} Jan 22 16:42:35 crc kubenswrapper[5032]: I0122 16:42:35.707236 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djfpc" event={"ID":"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1","Type":"ContainerStarted","Data":"16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f"} Jan 22 16:42:36 crc kubenswrapper[5032]: I0122 16:42:36.721553 5032 generic.go:334] "Generic (PLEG): container finished" podID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerID="16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f" exitCode=0 Jan 22 16:42:36 crc kubenswrapper[5032]: I0122 16:42:36.721645 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djfpc" event={"ID":"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1","Type":"ContainerDied","Data":"16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f"} Jan 22 16:42:37 crc kubenswrapper[5032]: I0122 16:42:37.735677 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djfpc" event={"ID":"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1","Type":"ContainerStarted","Data":"d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49"} Jan 22 16:42:37 crc kubenswrapper[5032]: I0122 16:42:37.757197 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-djfpc" podStartSLOduration=2.299829511 podStartE2EDuration="4.757175845s" podCreationTimestamp="2026-01-22 16:42:33 +0000 UTC" firstStartedPulling="2026-01-22 16:42:34.692599451 +0000 UTC m=+2258.504768482" lastFinishedPulling="2026-01-22 16:42:37.149945775 +0000 UTC m=+2260.962114816" observedRunningTime="2026-01-22 16:42:37.754268449 +0000 UTC m=+2261.566437490" watchObservedRunningTime="2026-01-22 16:42:37.757175845 +0000 UTC m=+2261.569344876" Jan 22 16:42:43 crc kubenswrapper[5032]: I0122 16:42:43.550478 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:43 crc kubenswrapper[5032]: I0122 16:42:43.551041 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:43 crc kubenswrapper[5032]: I0122 16:42:43.600360 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:43 crc kubenswrapper[5032]: I0122 16:42:43.849352 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:43 crc kubenswrapper[5032]: I0122 16:42:43.900512 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-djfpc"] Jan 22 16:42:45 crc kubenswrapper[5032]: I0122 16:42:45.456062 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:42:45 crc kubenswrapper[5032]: E0122 16:42:45.456374 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:42:45 crc kubenswrapper[5032]: I0122 16:42:45.818570 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-djfpc" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="registry-server" containerID="cri-o://d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49" gracePeriod=2 Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.319319 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.383827 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-catalog-content\") pod \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.383996 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-utilities\") pod \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.384029 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7btpr\" (UniqueName: \"kubernetes.io/projected/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-kube-api-access-7btpr\") pod \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\" (UID: \"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1\") " Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.385067 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-utilities" (OuterVolumeSpecName: "utilities") pod "8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" (UID: "8ca33e04-a809-4f2c-92b2-579aaf8fe2d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.385575 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.389120 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-kube-api-access-7btpr" (OuterVolumeSpecName: "kube-api-access-7btpr") pod "8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" (UID: "8ca33e04-a809-4f2c-92b2-579aaf8fe2d1"). InnerVolumeSpecName "kube-api-access-7btpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.414110 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" (UID: "8ca33e04-a809-4f2c-92b2-579aaf8fe2d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.488037 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.488075 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7btpr\" (UniqueName: \"kubernetes.io/projected/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1-kube-api-access-7btpr\") on node \"crc\" DevicePath \"\"" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.835030 5032 generic.go:334] "Generic (PLEG): container finished" podID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerID="d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49" exitCode=0 Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.835082 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djfpc" event={"ID":"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1","Type":"ContainerDied","Data":"d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49"} Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.835156 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djfpc" event={"ID":"8ca33e04-a809-4f2c-92b2-579aaf8fe2d1","Type":"ContainerDied","Data":"4a51408abfee916893a1923e29728276b0efd3ee0e78362bd6397bfb58128b01"} Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.835156 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djfpc" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.835218 5032 scope.go:117] "RemoveContainer" containerID="d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.883344 5032 scope.go:117] "RemoveContainer" containerID="16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.886912 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-djfpc"] Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.899924 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-djfpc"] Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.921274 5032 scope.go:117] "RemoveContainer" containerID="af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.972622 5032 scope.go:117] "RemoveContainer" containerID="d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49" Jan 22 16:42:46 crc kubenswrapper[5032]: E0122 16:42:46.973454 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49\": container with ID starting with d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49 not found: ID does not exist" containerID="d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.973511 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49"} err="failed to get container status \"d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49\": rpc error: code = NotFound desc = could not find container \"d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49\": container with ID starting with d19885528ad56a500a51e069f014e9f5fb1f48ab9248641ddfefaa148a25be49 not found: ID does not exist" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.973547 5032 scope.go:117] "RemoveContainer" containerID="16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f" Jan 22 16:42:46 crc kubenswrapper[5032]: E0122 16:42:46.974132 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f\": container with ID starting with 16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f not found: ID does not exist" containerID="16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.974199 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f"} err="failed to get container status \"16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f\": rpc error: code = NotFound desc = could not find container \"16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f\": container with ID starting with 16fe5d6dd84e4e53778ba351e7d886bfc4f6aad2844d9b111e176e53b17ccf0f not found: ID does not exist" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.974240 5032 scope.go:117] "RemoveContainer" containerID="af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861" Jan 22 16:42:46 crc kubenswrapper[5032]: E0122 16:42:46.974805 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861\": container with ID starting with af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861 not found: ID does not exist" containerID="af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861" Jan 22 16:42:46 crc kubenswrapper[5032]: I0122 16:42:46.974976 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861"} err="failed to get container status \"af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861\": rpc error: code = NotFound desc = could not find container \"af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861\": container with ID starting with af9fa14de2a1807e9ea882797ec037c20bc59aa98dcc1931f84ff35697c63861 not found: ID does not exist" Jan 22 16:42:48 crc kubenswrapper[5032]: I0122 16:42:48.470818 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" path="/var/lib/kubelet/pods/8ca33e04-a809-4f2c-92b2-579aaf8fe2d1/volumes" Jan 22 16:42:56 crc kubenswrapper[5032]: I0122 16:42:56.462255 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:42:56 crc kubenswrapper[5032]: E0122 16:42:56.463181 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:43:08 crc kubenswrapper[5032]: I0122 16:43:08.454750 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:43:08 crc kubenswrapper[5032]: E0122 16:43:08.455537 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:43:23 crc kubenswrapper[5032]: I0122 16:43:23.455154 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:43:23 crc kubenswrapper[5032]: E0122 16:43:23.456027 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:43:37 crc kubenswrapper[5032]: I0122 16:43:37.455787 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:43:37 crc kubenswrapper[5032]: E0122 16:43:37.456541 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:43:50 crc kubenswrapper[5032]: I0122 16:43:50.455783 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:43:50 crc kubenswrapper[5032]: E0122 16:43:50.456981 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:44:02 crc kubenswrapper[5032]: I0122 16:44:02.455573 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:44:02 crc kubenswrapper[5032]: E0122 16:44:02.456683 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:44:17 crc kubenswrapper[5032]: I0122 16:44:17.454693 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:44:17 crc kubenswrapper[5032]: E0122 16:44:17.455735 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:44:28 crc kubenswrapper[5032]: I0122 16:44:28.455035 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:44:28 crc kubenswrapper[5032]: E0122 16:44:28.456570 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.193820 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nfp7l"] Jan 22 16:44:32 crc kubenswrapper[5032]: E0122 16:44:32.194563 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="registry-server" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.194578 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="registry-server" Jan 22 16:44:32 crc kubenswrapper[5032]: E0122 16:44:32.194590 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="extract-content" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.194598 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="extract-content" Jan 22 16:44:32 crc kubenswrapper[5032]: E0122 16:44:32.194621 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="extract-utilities" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.194629 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="extract-utilities" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.194895 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ca33e04-a809-4f2c-92b2-579aaf8fe2d1" containerName="registry-server" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.196765 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.217335 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nfp7l"] Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.282468 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32f503dc-221c-4920-a207-f3651dd990df-utilities\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.282526 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32f503dc-221c-4920-a207-f3651dd990df-catalog-content\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.282687 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkspl\" (UniqueName: \"kubernetes.io/projected/32f503dc-221c-4920-a207-f3651dd990df-kube-api-access-xkspl\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.384876 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32f503dc-221c-4920-a207-f3651dd990df-utilities\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.384955 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32f503dc-221c-4920-a207-f3651dd990df-catalog-content\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.385044 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkspl\" (UniqueName: \"kubernetes.io/projected/32f503dc-221c-4920-a207-f3651dd990df-kube-api-access-xkspl\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.385760 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32f503dc-221c-4920-a207-f3651dd990df-utilities\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.385778 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32f503dc-221c-4920-a207-f3651dd990df-catalog-content\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.412410 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkspl\" (UniqueName: \"kubernetes.io/projected/32f503dc-221c-4920-a207-f3651dd990df-kube-api-access-xkspl\") pod \"redhat-operators-nfp7l\" (UID: \"32f503dc-221c-4920-a207-f3651dd990df\") " pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:32 crc kubenswrapper[5032]: I0122 16:44:32.537764 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:33 crc kubenswrapper[5032]: I0122 16:44:33.016578 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nfp7l"] Jan 22 16:44:33 crc kubenswrapper[5032]: I0122 16:44:33.961906 5032 generic.go:334] "Generic (PLEG): container finished" podID="32f503dc-221c-4920-a207-f3651dd990df" containerID="bdcd142b905ff28938c7c0088614e26fd33446e75539b65edb58a09be345abda" exitCode=0 Jan 22 16:44:33 crc kubenswrapper[5032]: I0122 16:44:33.961974 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfp7l" event={"ID":"32f503dc-221c-4920-a207-f3651dd990df","Type":"ContainerDied","Data":"bdcd142b905ff28938c7c0088614e26fd33446e75539b65edb58a09be345abda"} Jan 22 16:44:33 crc kubenswrapper[5032]: I0122 16:44:33.962627 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfp7l" event={"ID":"32f503dc-221c-4920-a207-f3651dd990df","Type":"ContainerStarted","Data":"2943137393e322affbfa23be48e67f30a657a964473ca6c61165a2667b42081e"} Jan 22 16:44:33 crc kubenswrapper[5032]: I0122 16:44:33.966500 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:44:42 crc kubenswrapper[5032]: I0122 16:44:42.454751 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:44:42 crc kubenswrapper[5032]: E0122 16:44:42.456461 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:44:43 crc kubenswrapper[5032]: I0122 16:44:43.058746 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfp7l" event={"ID":"32f503dc-221c-4920-a207-f3651dd990df","Type":"ContainerStarted","Data":"9eebe7a4c86c158a72b626735e4132caab66acc8dbc17ff87ca93d4263c66283"} Jan 22 16:44:46 crc kubenswrapper[5032]: I0122 16:44:46.091069 5032 generic.go:334] "Generic (PLEG): container finished" podID="32f503dc-221c-4920-a207-f3651dd990df" containerID="9eebe7a4c86c158a72b626735e4132caab66acc8dbc17ff87ca93d4263c66283" exitCode=0 Jan 22 16:44:46 crc kubenswrapper[5032]: I0122 16:44:46.091163 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfp7l" event={"ID":"32f503dc-221c-4920-a207-f3651dd990df","Type":"ContainerDied","Data":"9eebe7a4c86c158a72b626735e4132caab66acc8dbc17ff87ca93d4263c66283"} Jan 22 16:44:49 crc kubenswrapper[5032]: I0122 16:44:49.128678 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfp7l" event={"ID":"32f503dc-221c-4920-a207-f3651dd990df","Type":"ContainerStarted","Data":"c6ddf175bde8a977034db0f57308e6d1f38c43d954f233919cf7f04b7a579502"} Jan 22 16:44:49 crc kubenswrapper[5032]: I0122 16:44:49.162186 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nfp7l" podStartSLOduration=2.582222165 podStartE2EDuration="17.162161145s" podCreationTimestamp="2026-01-22 16:44:32 +0000 UTC" firstStartedPulling="2026-01-22 16:44:33.966284247 +0000 UTC m=+2377.778453278" lastFinishedPulling="2026-01-22 16:44:48.546223237 +0000 UTC m=+2392.358392258" observedRunningTime="2026-01-22 16:44:49.153566559 +0000 UTC m=+2392.965735600" watchObservedRunningTime="2026-01-22 16:44:49.162161145 +0000 UTC m=+2392.974330216" Jan 22 16:44:52 crc kubenswrapper[5032]: I0122 16:44:52.538640 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:52 crc kubenswrapper[5032]: I0122 16:44:52.539289 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:44:53 crc kubenswrapper[5032]: I0122 16:44:53.615042 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nfp7l" podUID="32f503dc-221c-4920-a207-f3651dd990df" containerName="registry-server" probeResult="failure" output=< Jan 22 16:44:53 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 16:44:53 crc kubenswrapper[5032]: > Jan 22 16:44:54 crc kubenswrapper[5032]: I0122 16:44:54.455364 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:44:54 crc kubenswrapper[5032]: E0122 16:44:54.455781 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.174941 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km"] Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.178077 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.181051 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.182599 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.184529 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km"] Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.346267 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/47319913-434f-4ed0-8c34-9ce2385770b6-config-volume\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.346651 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/47319913-434f-4ed0-8c34-9ce2385770b6-secret-volume\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.346788 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrbth\" (UniqueName: \"kubernetes.io/projected/47319913-434f-4ed0-8c34-9ce2385770b6-kube-api-access-nrbth\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.448218 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/47319913-434f-4ed0-8c34-9ce2385770b6-secret-volume\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.448302 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrbth\" (UniqueName: \"kubernetes.io/projected/47319913-434f-4ed0-8c34-9ce2385770b6-kube-api-access-nrbth\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.448403 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/47319913-434f-4ed0-8c34-9ce2385770b6-config-volume\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.449312 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/47319913-434f-4ed0-8c34-9ce2385770b6-config-volume\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.457967 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/47319913-434f-4ed0-8c34-9ce2385770b6-secret-volume\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.474348 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrbth\" (UniqueName: \"kubernetes.io/projected/47319913-434f-4ed0-8c34-9ce2385770b6-kube-api-access-nrbth\") pod \"collect-profiles-29485005-rl2km\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:00 crc kubenswrapper[5032]: I0122 16:45:00.515142 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:01 crc kubenswrapper[5032]: I0122 16:45:01.018963 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km"] Jan 22 16:45:01 crc kubenswrapper[5032]: I0122 16:45:01.255114 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" event={"ID":"47319913-434f-4ed0-8c34-9ce2385770b6","Type":"ContainerStarted","Data":"73b09e38a774f1e7384569368f65c504d0e2d248978bdeb20a753708f6b26c74"} Jan 22 16:45:01 crc kubenswrapper[5032]: I0122 16:45:01.255167 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" event={"ID":"47319913-434f-4ed0-8c34-9ce2385770b6","Type":"ContainerStarted","Data":"f6adc8d5fa927cbd46235cac58ea805031dca248d6870ce9141dd6c7da7563c4"} Jan 22 16:45:01 crc kubenswrapper[5032]: I0122 16:45:01.277937 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" podStartSLOduration=1.277919912 podStartE2EDuration="1.277919912s" podCreationTimestamp="2026-01-22 16:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 16:45:01.275306403 +0000 UTC m=+2405.087475434" watchObservedRunningTime="2026-01-22 16:45:01.277919912 +0000 UTC m=+2405.090088953" Jan 22 16:45:02 crc kubenswrapper[5032]: I0122 16:45:02.316140 5032 generic.go:334] "Generic (PLEG): container finished" podID="47319913-434f-4ed0-8c34-9ce2385770b6" containerID="73b09e38a774f1e7384569368f65c504d0e2d248978bdeb20a753708f6b26c74" exitCode=0 Jan 22 16:45:02 crc kubenswrapper[5032]: I0122 16:45:02.316677 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" event={"ID":"47319913-434f-4ed0-8c34-9ce2385770b6","Type":"ContainerDied","Data":"73b09e38a774f1e7384569368f65c504d0e2d248978bdeb20a753708f6b26c74"} Jan 22 16:45:02 crc kubenswrapper[5032]: I0122 16:45:02.587689 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:45:02 crc kubenswrapper[5032]: I0122 16:45:02.636063 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nfp7l" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.207560 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nfp7l"] Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.384583 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wqp6k"] Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.385197 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wqp6k" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="registry-server" containerID="cri-o://8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d" gracePeriod=2 Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.760287 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.818777 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/47319913-434f-4ed0-8c34-9ce2385770b6-config-volume\") pod \"47319913-434f-4ed0-8c34-9ce2385770b6\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.819157 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/47319913-434f-4ed0-8c34-9ce2385770b6-secret-volume\") pod \"47319913-434f-4ed0-8c34-9ce2385770b6\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.819277 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrbth\" (UniqueName: \"kubernetes.io/projected/47319913-434f-4ed0-8c34-9ce2385770b6-kube-api-access-nrbth\") pod \"47319913-434f-4ed0-8c34-9ce2385770b6\" (UID: \"47319913-434f-4ed0-8c34-9ce2385770b6\") " Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.821212 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47319913-434f-4ed0-8c34-9ce2385770b6-config-volume" (OuterVolumeSpecName: "config-volume") pod "47319913-434f-4ed0-8c34-9ce2385770b6" (UID: "47319913-434f-4ed0-8c34-9ce2385770b6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.826278 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47319913-434f-4ed0-8c34-9ce2385770b6-kube-api-access-nrbth" (OuterVolumeSpecName: "kube-api-access-nrbth") pod "47319913-434f-4ed0-8c34-9ce2385770b6" (UID: "47319913-434f-4ed0-8c34-9ce2385770b6"). InnerVolumeSpecName "kube-api-access-nrbth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.826690 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47319913-434f-4ed0-8c34-9ce2385770b6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "47319913-434f-4ed0-8c34-9ce2385770b6" (UID: "47319913-434f-4ed0-8c34-9ce2385770b6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.883696 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.922104 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-catalog-content\") pod \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.922352 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-utilities\") pod \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.922461 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-288wv\" (UniqueName: \"kubernetes.io/projected/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-kube-api-access-288wv\") pod \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\" (UID: \"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385\") " Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.922932 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/47319913-434f-4ed0-8c34-9ce2385770b6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.923010 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/47319913-434f-4ed0-8c34-9ce2385770b6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.923071 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrbth\" (UniqueName: \"kubernetes.io/projected/47319913-434f-4ed0-8c34-9ce2385770b6-kube-api-access-nrbth\") on node \"crc\" DevicePath \"\"" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.923682 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-utilities" (OuterVolumeSpecName: "utilities") pod "b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" (UID: "b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:45:03 crc kubenswrapper[5032]: I0122 16:45:03.926299 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-kube-api-access-288wv" (OuterVolumeSpecName: "kube-api-access-288wv") pod "b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" (UID: "b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385"). InnerVolumeSpecName "kube-api-access-288wv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.026584 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-288wv\" (UniqueName: \"kubernetes.io/projected/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-kube-api-access-288wv\") on node \"crc\" DevicePath \"\"" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.026638 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.034733 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" (UID: "b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.128790 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.337291 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" event={"ID":"47319913-434f-4ed0-8c34-9ce2385770b6","Type":"ContainerDied","Data":"f6adc8d5fa927cbd46235cac58ea805031dca248d6870ce9141dd6c7da7563c4"} Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.337336 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6adc8d5fa927cbd46235cac58ea805031dca248d6870ce9141dd6c7da7563c4" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.337394 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.345636 5032 generic.go:334] "Generic (PLEG): container finished" podID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerID="8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d" exitCode=0 Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.345680 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqp6k" event={"ID":"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385","Type":"ContainerDied","Data":"8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d"} Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.345732 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqp6k" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.345751 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqp6k" event={"ID":"b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385","Type":"ContainerDied","Data":"6ef17ab82e64564e0abab261d587eeab3903e57cb408578c77b5234564ae7230"} Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.345775 5032 scope.go:117] "RemoveContainer" containerID="8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.352444 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5"] Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.363058 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484960-qpvc5"] Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.375231 5032 scope.go:117] "RemoveContainer" containerID="5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.391874 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wqp6k"] Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.396908 5032 scope.go:117] "RemoveContainer" containerID="b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.400072 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wqp6k"] Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.414681 5032 scope.go:117] "RemoveContainer" containerID="8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d" Jan 22 16:45:04 crc kubenswrapper[5032]: E0122 16:45:04.416145 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d\": container with ID starting with 8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d not found: ID does not exist" containerID="8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.416199 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d"} err="failed to get container status \"8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d\": rpc error: code = NotFound desc = could not find container \"8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d\": container with ID starting with 8ec3a31365d0357b8694b38a9f01db1fa31836a44eae2fa217790e7ea480484d not found: ID does not exist" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.416232 5032 scope.go:117] "RemoveContainer" containerID="5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f" Jan 22 16:45:04 crc kubenswrapper[5032]: E0122 16:45:04.416780 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f\": container with ID starting with 5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f not found: ID does not exist" containerID="5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.416834 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f"} err="failed to get container status \"5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f\": rpc error: code = NotFound desc = could not find container \"5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f\": container with ID starting with 5fdaea5cadbeea4d5b618bf058e8a58beaf6344dfed03be5fa349b94e2665a6f not found: ID does not exist" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.416882 5032 scope.go:117] "RemoveContainer" containerID="b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759" Jan 22 16:45:04 crc kubenswrapper[5032]: E0122 16:45:04.417302 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759\": container with ID starting with b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759 not found: ID does not exist" containerID="b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.417367 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759"} err="failed to get container status \"b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759\": rpc error: code = NotFound desc = could not find container \"b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759\": container with ID starting with b66905ea980bc8f5c36bac46287825360b0c25f976bfabde202e84b2b290f759 not found: ID does not exist" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.466101 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e14d069-8a95-4e40-b130-f0c65506b423" path="/var/lib/kubelet/pods/1e14d069-8a95-4e40-b130-f0c65506b423/volumes" Jan 22 16:45:04 crc kubenswrapper[5032]: I0122 16:45:04.467132 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" path="/var/lib/kubelet/pods/b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385/volumes" Jan 22 16:45:05 crc kubenswrapper[5032]: I0122 16:45:05.454368 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:45:05 crc kubenswrapper[5032]: E0122 16:45:05.454977 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:45:18 crc kubenswrapper[5032]: I0122 16:45:18.454686 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:45:18 crc kubenswrapper[5032]: E0122 16:45:18.455899 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:45:19 crc kubenswrapper[5032]: I0122 16:45:19.707723 5032 scope.go:117] "RemoveContainer" containerID="8d0f81a8aa54f7c86ea0f7f31dd16b2d91ad14f313018c0990a5ffcce9ddf421" Jan 22 16:45:29 crc kubenswrapper[5032]: I0122 16:45:29.455511 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:45:29 crc kubenswrapper[5032]: E0122 16:45:29.456406 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:45:40 crc kubenswrapper[5032]: I0122 16:45:40.455014 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:45:40 crc kubenswrapper[5032]: E0122 16:45:40.457809 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:45:55 crc kubenswrapper[5032]: I0122 16:45:55.454821 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:45:55 crc kubenswrapper[5032]: E0122 16:45:55.456124 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:46:10 crc kubenswrapper[5032]: I0122 16:46:10.454778 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:46:10 crc kubenswrapper[5032]: E0122 16:46:10.456058 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:46:19 crc kubenswrapper[5032]: I0122 16:46:19.180105 5032 generic.go:334] "Generic (PLEG): container finished" podID="edd38900-30d9-4f92-9e58-31002222e3ce" containerID="ccc1bb5ce88a432bc142f22c22a581e1f315875d399cfe6524c4858b8a5e204e" exitCode=0 Jan 22 16:46:19 crc kubenswrapper[5032]: I0122 16:46:19.180188 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" event={"ID":"edd38900-30d9-4f92-9e58-31002222e3ce","Type":"ContainerDied","Data":"ccc1bb5ce88a432bc142f22c22a581e1f315875d399cfe6524c4858b8a5e204e"} Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.616220 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.746265 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-combined-ca-bundle\") pod \"edd38900-30d9-4f92-9e58-31002222e3ce\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.746349 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f28qr\" (UniqueName: \"kubernetes.io/projected/edd38900-30d9-4f92-9e58-31002222e3ce-kube-api-access-f28qr\") pod \"edd38900-30d9-4f92-9e58-31002222e3ce\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.746412 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-inventory\") pod \"edd38900-30d9-4f92-9e58-31002222e3ce\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.746461 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-ssh-key-openstack-edpm-ipam\") pod \"edd38900-30d9-4f92-9e58-31002222e3ce\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.746567 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-secret-0\") pod \"edd38900-30d9-4f92-9e58-31002222e3ce\" (UID: \"edd38900-30d9-4f92-9e58-31002222e3ce\") " Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.753278 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edd38900-30d9-4f92-9e58-31002222e3ce-kube-api-access-f28qr" (OuterVolumeSpecName: "kube-api-access-f28qr") pod "edd38900-30d9-4f92-9e58-31002222e3ce" (UID: "edd38900-30d9-4f92-9e58-31002222e3ce"). InnerVolumeSpecName "kube-api-access-f28qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.754029 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "edd38900-30d9-4f92-9e58-31002222e3ce" (UID: "edd38900-30d9-4f92-9e58-31002222e3ce"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.782667 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "edd38900-30d9-4f92-9e58-31002222e3ce" (UID: "edd38900-30d9-4f92-9e58-31002222e3ce"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.784997 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-inventory" (OuterVolumeSpecName: "inventory") pod "edd38900-30d9-4f92-9e58-31002222e3ce" (UID: "edd38900-30d9-4f92-9e58-31002222e3ce"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.806091 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "edd38900-30d9-4f92-9e58-31002222e3ce" (UID: "edd38900-30d9-4f92-9e58-31002222e3ce"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.849218 5032 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.849250 5032 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.849261 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f28qr\" (UniqueName: \"kubernetes.io/projected/edd38900-30d9-4f92-9e58-31002222e3ce-kube-api-access-f28qr\") on node \"crc\" DevicePath \"\"" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.849272 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:46:20 crc kubenswrapper[5032]: I0122 16:46:20.849280 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/edd38900-30d9-4f92-9e58-31002222e3ce-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.218052 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" event={"ID":"edd38900-30d9-4f92-9e58-31002222e3ce","Type":"ContainerDied","Data":"1055ebcb1f0a47c3827730ed22fc7dd0bd44f953022ba82697d1d2fa1e657cfd"} Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.218552 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1055ebcb1f0a47c3827730ed22fc7dd0bd44f953022ba82697d1d2fa1e657cfd" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.218169 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.304374 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw"] Jan 22 16:46:21 crc kubenswrapper[5032]: E0122 16:46:21.304983 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="registry-server" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305059 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="registry-server" Jan 22 16:46:21 crc kubenswrapper[5032]: E0122 16:46:21.305128 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edd38900-30d9-4f92-9e58-31002222e3ce" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305182 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="edd38900-30d9-4f92-9e58-31002222e3ce" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 22 16:46:21 crc kubenswrapper[5032]: E0122 16:46:21.305251 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="extract-utilities" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305332 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="extract-utilities" Jan 22 16:46:21 crc kubenswrapper[5032]: E0122 16:46:21.305402 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47319913-434f-4ed0-8c34-9ce2385770b6" containerName="collect-profiles" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305458 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="47319913-434f-4ed0-8c34-9ce2385770b6" containerName="collect-profiles" Jan 22 16:46:21 crc kubenswrapper[5032]: E0122 16:46:21.305522 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="extract-content" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305577 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="extract-content" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305792 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="edd38900-30d9-4f92-9e58-31002222e3ce" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305881 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="47319913-434f-4ed0-8c34-9ce2385770b6" containerName="collect-profiles" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.305954 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5b5d9ec-ecdf-4afe-b75d-2b29f32e0385" containerName="registry-server" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.306578 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.308174 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.311198 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.311638 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.312101 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.312194 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.312338 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.312501 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.325624 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw"] Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.359904 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pvpb\" (UniqueName: \"kubernetes.io/projected/4b0e9abf-570b-428d-8195-a848d6297da4-kube-api-access-5pvpb\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.359961 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.359978 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.360002 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.360034 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.360081 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.360099 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4b0e9abf-570b-428d-8195-a848d6297da4-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.360118 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.360133 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.461812 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pvpb\" (UniqueName: \"kubernetes.io/projected/4b0e9abf-570b-428d-8195-a848d6297da4-kube-api-access-5pvpb\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.461891 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.461917 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.461943 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.461978 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.462032 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.462051 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4b0e9abf-570b-428d-8195-a848d6297da4-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.462071 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.462087 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.463595 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4b0e9abf-570b-428d-8195-a848d6297da4-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.467418 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.467609 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.467950 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.468271 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.468643 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.469089 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.469699 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.483093 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pvpb\" (UniqueName: \"kubernetes.io/projected/4b0e9abf-570b-428d-8195-a848d6297da4-kube-api-access-5pvpb\") pod \"nova-edpm-deployment-openstack-edpm-ipam-84krw\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:21 crc kubenswrapper[5032]: I0122 16:46:21.669386 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:46:22 crc kubenswrapper[5032]: I0122 16:46:22.067336 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw"] Jan 22 16:46:22 crc kubenswrapper[5032]: I0122 16:46:22.235296 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" event={"ID":"4b0e9abf-570b-428d-8195-a848d6297da4","Type":"ContainerStarted","Data":"9fa2a36579e5682116a418866f8bce399cd2ed34b361836b0d1801420909256d"} Jan 22 16:46:22 crc kubenswrapper[5032]: I0122 16:46:22.457895 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:46:22 crc kubenswrapper[5032]: E0122 16:46:22.458364 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:46:23 crc kubenswrapper[5032]: I0122 16:46:23.251413 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" event={"ID":"4b0e9abf-570b-428d-8195-a848d6297da4","Type":"ContainerStarted","Data":"8b233bad0e6b5df71c78ad12ef9e8e3128a35e859dc37e353e251524e60ff0ef"} Jan 22 16:46:23 crc kubenswrapper[5032]: I0122 16:46:23.300409 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" podStartSLOduration=1.842538313 podStartE2EDuration="2.300380461s" podCreationTimestamp="2026-01-22 16:46:21 +0000 UTC" firstStartedPulling="2026-01-22 16:46:22.071968777 +0000 UTC m=+2485.884137818" lastFinishedPulling="2026-01-22 16:46:22.529810915 +0000 UTC m=+2486.341979966" observedRunningTime="2026-01-22 16:46:23.2786203 +0000 UTC m=+2487.090789341" watchObservedRunningTime="2026-01-22 16:46:23.300380461 +0000 UTC m=+2487.112549522" Jan 22 16:46:37 crc kubenswrapper[5032]: I0122 16:46:37.455381 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:46:37 crc kubenswrapper[5032]: E0122 16:46:37.456100 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:46:48 crc kubenswrapper[5032]: I0122 16:46:48.455292 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:46:48 crc kubenswrapper[5032]: E0122 16:46:48.455968 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:46:59 crc kubenswrapper[5032]: I0122 16:46:59.455102 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:46:59 crc kubenswrapper[5032]: E0122 16:46:59.457165 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:47:11 crc kubenswrapper[5032]: I0122 16:47:11.455726 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:47:11 crc kubenswrapper[5032]: E0122 16:47:11.456718 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:47:26 crc kubenswrapper[5032]: I0122 16:47:26.459829 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:47:26 crc kubenswrapper[5032]: E0122 16:47:26.460521 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:47:41 crc kubenswrapper[5032]: I0122 16:47:41.454949 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:47:42 crc kubenswrapper[5032]: I0122 16:47:42.121758 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"78de692c8630b29db19328b8df18d64297a6bb5911d69923e217598f00958d24"} Jan 22 16:48:51 crc kubenswrapper[5032]: I0122 16:48:51.904383 5032 generic.go:334] "Generic (PLEG): container finished" podID="4b0e9abf-570b-428d-8195-a848d6297da4" containerID="8b233bad0e6b5df71c78ad12ef9e8e3128a35e859dc37e353e251524e60ff0ef" exitCode=0 Jan 22 16:48:51 crc kubenswrapper[5032]: I0122 16:48:51.905124 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" event={"ID":"4b0e9abf-570b-428d-8195-a848d6297da4","Type":"ContainerDied","Data":"8b233bad0e6b5df71c78ad12ef9e8e3128a35e859dc37e353e251524e60ff0ef"} Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.354976 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515412 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pvpb\" (UniqueName: \"kubernetes.io/projected/4b0e9abf-570b-428d-8195-a848d6297da4-kube-api-access-5pvpb\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515507 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-ssh-key-openstack-edpm-ipam\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515560 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4b0e9abf-570b-428d-8195-a848d6297da4-nova-extra-config-0\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515592 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-combined-ca-bundle\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515706 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-0\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515748 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-1\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515799 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-0\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515822 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-1\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.515875 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-inventory\") pod \"4b0e9abf-570b-428d-8195-a848d6297da4\" (UID: \"4b0e9abf-570b-428d-8195-a848d6297da4\") " Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.536996 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b0e9abf-570b-428d-8195-a848d6297da4-kube-api-access-5pvpb" (OuterVolumeSpecName: "kube-api-access-5pvpb") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "kube-api-access-5pvpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.540733 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.541819 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.543125 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.550399 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.554123 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b0e9abf-570b-428d-8195-a848d6297da4-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.556083 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.559734 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.568046 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-inventory" (OuterVolumeSpecName: "inventory") pod "4b0e9abf-570b-428d-8195-a848d6297da4" (UID: "4b0e9abf-570b-428d-8195-a848d6297da4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618270 5032 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618306 5032 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618315 5032 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618324 5032 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618335 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618344 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pvpb\" (UniqueName: \"kubernetes.io/projected/4b0e9abf-570b-428d-8195-a848d6297da4-kube-api-access-5pvpb\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618352 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618361 5032 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4b0e9abf-570b-428d-8195-a848d6297da4-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.618370 5032 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b0e9abf-570b-428d-8195-a848d6297da4-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.937444 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" event={"ID":"4b0e9abf-570b-428d-8195-a848d6297da4","Type":"ContainerDied","Data":"9fa2a36579e5682116a418866f8bce399cd2ed34b361836b0d1801420909256d"} Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.937497 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9fa2a36579e5682116a418866f8bce399cd2ed34b361836b0d1801420909256d" Jan 22 16:48:53 crc kubenswrapper[5032]: I0122 16:48:53.937528 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-84krw" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.053515 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4"] Jan 22 16:48:54 crc kubenswrapper[5032]: E0122 16:48:54.053882 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b0e9abf-570b-428d-8195-a848d6297da4" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.053899 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b0e9abf-570b-428d-8195-a848d6297da4" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.054075 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b0e9abf-570b-428d-8195-a848d6297da4" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.054643 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.056664 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.057812 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.058087 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.058221 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.059188 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2q86t" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.079658 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4"] Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.232083 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.232131 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.232336 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.232375 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.232406 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vz92t\" (UniqueName: \"kubernetes.io/projected/fc477587-ce40-4773-b6a3-3baefaff6b24-kube-api-access-vz92t\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.232527 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.232625 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.333977 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.334028 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.334115 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.334147 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.334171 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vz92t\" (UniqueName: \"kubernetes.io/projected/fc477587-ce40-4773-b6a3-3baefaff6b24-kube-api-access-vz92t\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.334227 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.334284 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.339613 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.340043 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.340100 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.342691 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.350280 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.351078 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.359068 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vz92t\" (UniqueName: \"kubernetes.io/projected/fc477587-ce40-4773-b6a3-3baefaff6b24-kube-api-access-vz92t\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.376927 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:48:54 crc kubenswrapper[5032]: I0122 16:48:54.949525 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4"] Jan 22 16:48:54 crc kubenswrapper[5032]: W0122 16:48:54.954327 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc477587_ce40_4773_b6a3_3baefaff6b24.slice/crio-83a49718e9db235c39f1bccc57645f9701a1483b10fc1bf94d06053f27538bf5 WatchSource:0}: Error finding container 83a49718e9db235c39f1bccc57645f9701a1483b10fc1bf94d06053f27538bf5: Status 404 returned error can't find the container with id 83a49718e9db235c39f1bccc57645f9701a1483b10fc1bf94d06053f27538bf5 Jan 22 16:48:55 crc kubenswrapper[5032]: I0122 16:48:55.956748 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" event={"ID":"fc477587-ce40-4773-b6a3-3baefaff6b24","Type":"ContainerStarted","Data":"661d9500077101bd3f77d7ca994735e4c40d45aa70951d47ad6b5b7c70cfff9d"} Jan 22 16:48:55 crc kubenswrapper[5032]: I0122 16:48:55.957359 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" event={"ID":"fc477587-ce40-4773-b6a3-3baefaff6b24","Type":"ContainerStarted","Data":"83a49718e9db235c39f1bccc57645f9701a1483b10fc1bf94d06053f27538bf5"} Jan 22 16:48:55 crc kubenswrapper[5032]: I0122 16:48:55.982052 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" podStartSLOduration=1.535605815 podStartE2EDuration="1.982034648s" podCreationTimestamp="2026-01-22 16:48:54 +0000 UTC" firstStartedPulling="2026-01-22 16:48:54.956975007 +0000 UTC m=+2638.769144038" lastFinishedPulling="2026-01-22 16:48:55.40340383 +0000 UTC m=+2639.215572871" observedRunningTime="2026-01-22 16:48:55.97083891 +0000 UTC m=+2639.783007961" watchObservedRunningTime="2026-01-22 16:48:55.982034648 +0000 UTC m=+2639.794203679" Jan 22 16:50:03 crc kubenswrapper[5032]: I0122 16:50:03.016624 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:50:03 crc kubenswrapper[5032]: I0122 16:50:03.017453 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:50:33 crc kubenswrapper[5032]: I0122 16:50:33.016847 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:50:33 crc kubenswrapper[5032]: I0122 16:50:33.017691 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.017054 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.017583 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.017647 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.018657 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"78de692c8630b29db19328b8df18d64297a6bb5911d69923e217598f00958d24"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.018737 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://78de692c8630b29db19328b8df18d64297a6bb5911d69923e217598f00958d24" gracePeriod=600 Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.391441 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="78de692c8630b29db19328b8df18d64297a6bb5911d69923e217598f00958d24" exitCode=0 Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.391512 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"78de692c8630b29db19328b8df18d64297a6bb5911d69923e217598f00958d24"} Jan 22 16:51:03 crc kubenswrapper[5032]: I0122 16:51:03.391916 5032 scope.go:117] "RemoveContainer" containerID="853d8ccb3601fc23462a80a3017c9cd80b9599041a6d5f94c5291985302ff709" Jan 22 16:51:04 crc kubenswrapper[5032]: I0122 16:51:04.400927 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a"} Jan 22 16:51:38 crc kubenswrapper[5032]: I0122 16:51:38.752942 5032 generic.go:334] "Generic (PLEG): container finished" podID="fc477587-ce40-4773-b6a3-3baefaff6b24" containerID="661d9500077101bd3f77d7ca994735e4c40d45aa70951d47ad6b5b7c70cfff9d" exitCode=0 Jan 22 16:51:38 crc kubenswrapper[5032]: I0122 16:51:38.753053 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" event={"ID":"fc477587-ce40-4773-b6a3-3baefaff6b24","Type":"ContainerDied","Data":"661d9500077101bd3f77d7ca994735e4c40d45aa70951d47ad6b5b7c70cfff9d"} Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.319378 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.465966 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-inventory\") pod \"fc477587-ce40-4773-b6a3-3baefaff6b24\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.466131 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-2\") pod \"fc477587-ce40-4773-b6a3-3baefaff6b24\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.466184 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-0\") pod \"fc477587-ce40-4773-b6a3-3baefaff6b24\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.466222 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ssh-key-openstack-edpm-ipam\") pod \"fc477587-ce40-4773-b6a3-3baefaff6b24\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.466268 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-telemetry-combined-ca-bundle\") pod \"fc477587-ce40-4773-b6a3-3baefaff6b24\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.466333 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-1\") pod \"fc477587-ce40-4773-b6a3-3baefaff6b24\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.466373 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vz92t\" (UniqueName: \"kubernetes.io/projected/fc477587-ce40-4773-b6a3-3baefaff6b24-kube-api-access-vz92t\") pod \"fc477587-ce40-4773-b6a3-3baefaff6b24\" (UID: \"fc477587-ce40-4773-b6a3-3baefaff6b24\") " Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.472817 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc477587-ce40-4773-b6a3-3baefaff6b24-kube-api-access-vz92t" (OuterVolumeSpecName: "kube-api-access-vz92t") pod "fc477587-ce40-4773-b6a3-3baefaff6b24" (UID: "fc477587-ce40-4773-b6a3-3baefaff6b24"). InnerVolumeSpecName "kube-api-access-vz92t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.477736 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "fc477587-ce40-4773-b6a3-3baefaff6b24" (UID: "fc477587-ce40-4773-b6a3-3baefaff6b24"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.495133 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "fc477587-ce40-4773-b6a3-3baefaff6b24" (UID: "fc477587-ce40-4773-b6a3-3baefaff6b24"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.496055 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-inventory" (OuterVolumeSpecName: "inventory") pod "fc477587-ce40-4773-b6a3-3baefaff6b24" (UID: "fc477587-ce40-4773-b6a3-3baefaff6b24"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.501644 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "fc477587-ce40-4773-b6a3-3baefaff6b24" (UID: "fc477587-ce40-4773-b6a3-3baefaff6b24"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.512988 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "fc477587-ce40-4773-b6a3-3baefaff6b24" (UID: "fc477587-ce40-4773-b6a3-3baefaff6b24"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.515304 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "fc477587-ce40-4773-b6a3-3baefaff6b24" (UID: "fc477587-ce40-4773-b6a3-3baefaff6b24"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.568072 5032 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.568111 5032 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.568128 5032 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.568141 5032 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.568155 5032 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.568167 5032 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/fc477587-ce40-4773-b6a3-3baefaff6b24-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.568179 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vz92t\" (UniqueName: \"kubernetes.io/projected/fc477587-ce40-4773-b6a3-3baefaff6b24-kube-api-access-vz92t\") on node \"crc\" DevicePath \"\"" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.777723 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" event={"ID":"fc477587-ce40-4773-b6a3-3baefaff6b24","Type":"ContainerDied","Data":"83a49718e9db235c39f1bccc57645f9701a1483b10fc1bf94d06053f27538bf5"} Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.777782 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83a49718e9db235c39f1bccc57645f9701a1483b10fc1bf94d06053f27538bf5" Jan 22 16:51:40 crc kubenswrapper[5032]: I0122 16:51:40.777804 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4" Jan 22 16:53:03 crc kubenswrapper[5032]: I0122 16:53:03.017199 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:53:03 crc kubenswrapper[5032]: I0122 16:53:03.017943 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.581249 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-76bkz"] Jan 22 16:53:04 crc kubenswrapper[5032]: E0122 16:53:04.582193 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc477587-ce40-4773-b6a3-3baefaff6b24" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.582228 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc477587-ce40-4773-b6a3-3baefaff6b24" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.582632 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc477587-ce40-4773-b6a3-3baefaff6b24" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.585081 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.591274 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-76bkz"] Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.678150 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-utilities\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.678452 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjrjg\" (UniqueName: \"kubernetes.io/projected/38e9fac6-aa45-4767-8db6-1a53a6d35689-kube-api-access-vjrjg\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.678604 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-catalog-content\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.780877 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-catalog-content\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.781020 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-utilities\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.781119 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjrjg\" (UniqueName: \"kubernetes.io/projected/38e9fac6-aa45-4767-8db6-1a53a6d35689-kube-api-access-vjrjg\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.781569 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-utilities\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.781569 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-catalog-content\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.807984 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjrjg\" (UniqueName: \"kubernetes.io/projected/38e9fac6-aa45-4767-8db6-1a53a6d35689-kube-api-access-vjrjg\") pod \"certified-operators-76bkz\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:04 crc kubenswrapper[5032]: I0122 16:53:04.915019 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:05 crc kubenswrapper[5032]: I0122 16:53:05.521933 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-76bkz"] Jan 22 16:53:05 crc kubenswrapper[5032]: I0122 16:53:05.876508 5032 generic.go:334] "Generic (PLEG): container finished" podID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerID="087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18" exitCode=0 Jan 22 16:53:05 crc kubenswrapper[5032]: I0122 16:53:05.876566 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76bkz" event={"ID":"38e9fac6-aa45-4767-8db6-1a53a6d35689","Type":"ContainerDied","Data":"087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18"} Jan 22 16:53:05 crc kubenswrapper[5032]: I0122 16:53:05.877040 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76bkz" event={"ID":"38e9fac6-aa45-4767-8db6-1a53a6d35689","Type":"ContainerStarted","Data":"aaa2c6053029e51629d79839ebdca6977ab339ca7988a9a6411439ed28359cb9"} Jan 22 16:53:05 crc kubenswrapper[5032]: I0122 16:53:05.879446 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:53:10 crc kubenswrapper[5032]: I0122 16:53:10.922712 5032 generic.go:334] "Generic (PLEG): container finished" podID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerID="208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58" exitCode=0 Jan 22 16:53:10 crc kubenswrapper[5032]: I0122 16:53:10.922799 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76bkz" event={"ID":"38e9fac6-aa45-4767-8db6-1a53a6d35689","Type":"ContainerDied","Data":"208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58"} Jan 22 16:53:11 crc kubenswrapper[5032]: I0122 16:53:11.939129 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76bkz" event={"ID":"38e9fac6-aa45-4767-8db6-1a53a6d35689","Type":"ContainerStarted","Data":"19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47"} Jan 22 16:53:11 crc kubenswrapper[5032]: I0122 16:53:11.957550 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-76bkz" podStartSLOduration=2.367710429 podStartE2EDuration="7.957532041s" podCreationTimestamp="2026-01-22 16:53:04 +0000 UTC" firstStartedPulling="2026-01-22 16:53:05.879138633 +0000 UTC m=+2889.691307664" lastFinishedPulling="2026-01-22 16:53:11.468960205 +0000 UTC m=+2895.281129276" observedRunningTime="2026-01-22 16:53:11.955147648 +0000 UTC m=+2895.767316719" watchObservedRunningTime="2026-01-22 16:53:11.957532041 +0000 UTC m=+2895.769701082" Jan 22 16:53:14 crc kubenswrapper[5032]: I0122 16:53:14.915771 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:14 crc kubenswrapper[5032]: I0122 16:53:14.916359 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:14 crc kubenswrapper[5032]: I0122 16:53:14.976375 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.048617 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.109020 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-76bkz"] Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.159262 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7brs8"] Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.159505 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7brs8" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="registry-server" containerID="cri-o://a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b" gracePeriod=2 Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.644232 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.728598 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqx5q\" (UniqueName: \"kubernetes.io/projected/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-kube-api-access-qqx5q\") pod \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.728668 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-utilities\") pod \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.729185 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-utilities" (OuterVolumeSpecName: "utilities") pod "e3627b49-2dc6-42b5-a920-22d9aca7d4dd" (UID: "e3627b49-2dc6-42b5-a920-22d9aca7d4dd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.729263 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-catalog-content\") pod \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\" (UID: \"e3627b49-2dc6-42b5-a920-22d9aca7d4dd\") " Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.735804 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.736069 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-kube-api-access-qqx5q" (OuterVolumeSpecName: "kube-api-access-qqx5q") pod "e3627b49-2dc6-42b5-a920-22d9aca7d4dd" (UID: "e3627b49-2dc6-42b5-a920-22d9aca7d4dd"). InnerVolumeSpecName "kube-api-access-qqx5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.768939 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3627b49-2dc6-42b5-a920-22d9aca7d4dd" (UID: "e3627b49-2dc6-42b5-a920-22d9aca7d4dd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.838508 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqx5q\" (UniqueName: \"kubernetes.io/projected/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-kube-api-access-qqx5q\") on node \"crc\" DevicePath \"\"" Jan 22 16:53:25 crc kubenswrapper[5032]: I0122 16:53:25.838698 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3627b49-2dc6-42b5-a920-22d9aca7d4dd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.069774 5032 generic.go:334] "Generic (PLEG): container finished" podID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerID="a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b" exitCode=0 Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.069820 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7brs8" event={"ID":"e3627b49-2dc6-42b5-a920-22d9aca7d4dd","Type":"ContainerDied","Data":"a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b"} Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.069845 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7brs8" event={"ID":"e3627b49-2dc6-42b5-a920-22d9aca7d4dd","Type":"ContainerDied","Data":"9e30ded1e8c2b59229c81dc3e0c44e9023b5c30fb6653dc6ff6ae954e7dd97d2"} Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.069897 5032 scope.go:117] "RemoveContainer" containerID="a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.069966 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7brs8" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.088414 5032 scope.go:117] "RemoveContainer" containerID="0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.102969 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7brs8"] Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.110514 5032 scope.go:117] "RemoveContainer" containerID="f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.114543 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7brs8"] Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.149522 5032 scope.go:117] "RemoveContainer" containerID="a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b" Jan 22 16:53:26 crc kubenswrapper[5032]: E0122 16:53:26.149979 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b\": container with ID starting with a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b not found: ID does not exist" containerID="a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.150033 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b"} err="failed to get container status \"a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b\": rpc error: code = NotFound desc = could not find container \"a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b\": container with ID starting with a087e6a6e1a989e70c8d75a6c15a3231514341cf15cc8f9d92b5ef349603645b not found: ID does not exist" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.150064 5032 scope.go:117] "RemoveContainer" containerID="0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568" Jan 22 16:53:26 crc kubenswrapper[5032]: E0122 16:53:26.150546 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568\": container with ID starting with 0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568 not found: ID does not exist" containerID="0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.150589 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568"} err="failed to get container status \"0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568\": rpc error: code = NotFound desc = could not find container \"0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568\": container with ID starting with 0dcae74be9ae97f1b8b17732fb536a16c6192791c36aa4be63ddec410f60c568 not found: ID does not exist" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.150620 5032 scope.go:117] "RemoveContainer" containerID="f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5" Jan 22 16:53:26 crc kubenswrapper[5032]: E0122 16:53:26.151074 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5\": container with ID starting with f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5 not found: ID does not exist" containerID="f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.151104 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5"} err="failed to get container status \"f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5\": rpc error: code = NotFound desc = could not find container \"f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5\": container with ID starting with f816486960469fbee054ec5fcd31831d6c6bcdf214ab8209a6f1ce1aa3c3bfd5 not found: ID does not exist" Jan 22 16:53:26 crc kubenswrapper[5032]: I0122 16:53:26.463965 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" path="/var/lib/kubelet/pods/e3627b49-2dc6-42b5-a920-22d9aca7d4dd/volumes" Jan 22 16:53:33 crc kubenswrapper[5032]: I0122 16:53:33.017331 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:53:33 crc kubenswrapper[5032]: I0122 16:53:33.018063 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.017215 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.017753 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.017799 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.018521 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.018570 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" gracePeriod=600 Jan 22 16:54:03 crc kubenswrapper[5032]: E0122 16:54:03.141581 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.583399 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" exitCode=0 Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.583457 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a"} Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.583494 5032 scope.go:117] "RemoveContainer" containerID="78de692c8630b29db19328b8df18d64297a6bb5911d69923e217598f00958d24" Jan 22 16:54:03 crc kubenswrapper[5032]: I0122 16:54:03.584232 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:54:03 crc kubenswrapper[5032]: E0122 16:54:03.584613 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:54:14 crc kubenswrapper[5032]: I0122 16:54:14.455442 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:54:14 crc kubenswrapper[5032]: E0122 16:54:14.456100 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:54:26 crc kubenswrapper[5032]: I0122 16:54:26.463599 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:54:26 crc kubenswrapper[5032]: E0122 16:54:26.464642 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:54:38 crc kubenswrapper[5032]: I0122 16:54:38.469057 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:54:38 crc kubenswrapper[5032]: E0122 16:54:38.470335 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:54:50 crc kubenswrapper[5032]: I0122 16:54:50.455736 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:54:50 crc kubenswrapper[5032]: E0122 16:54:50.456515 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:55:01 crc kubenswrapper[5032]: I0122 16:55:01.455313 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:55:01 crc kubenswrapper[5032]: E0122 16:55:01.456169 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.867031 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wphtg"] Jan 22 16:55:12 crc kubenswrapper[5032]: E0122 16:55:12.869290 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="extract-utilities" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.869406 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="extract-utilities" Jan 22 16:55:12 crc kubenswrapper[5032]: E0122 16:55:12.869496 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="extract-content" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.869582 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="extract-content" Jan 22 16:55:12 crc kubenswrapper[5032]: E0122 16:55:12.869667 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="registry-server" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.869740 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="registry-server" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.870191 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3627b49-2dc6-42b5-a920-22d9aca7d4dd" containerName="registry-server" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.871928 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.881628 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wphtg"] Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.930408 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-catalog-content\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.930449 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzqf5\" (UniqueName: \"kubernetes.io/projected/06150632-ed2c-4ce8-96c5-312038b96e44-kube-api-access-gzqf5\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:12 crc kubenswrapper[5032]: I0122 16:55:12.930611 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-utilities\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.031199 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-utilities\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.031256 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-catalog-content\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.031271 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzqf5\" (UniqueName: \"kubernetes.io/projected/06150632-ed2c-4ce8-96c5-312038b96e44-kube-api-access-gzqf5\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.031791 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-utilities\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.031842 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-catalog-content\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.050788 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzqf5\" (UniqueName: \"kubernetes.io/projected/06150632-ed2c-4ce8-96c5-312038b96e44-kube-api-access-gzqf5\") pod \"redhat-operators-wphtg\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.200213 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:13 crc kubenswrapper[5032]: I0122 16:55:13.687849 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wphtg"] Jan 22 16:55:14 crc kubenswrapper[5032]: I0122 16:55:14.284726 5032 generic.go:334] "Generic (PLEG): container finished" podID="06150632-ed2c-4ce8-96c5-312038b96e44" containerID="9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2" exitCode=0 Jan 22 16:55:14 crc kubenswrapper[5032]: I0122 16:55:14.284801 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wphtg" event={"ID":"06150632-ed2c-4ce8-96c5-312038b96e44","Type":"ContainerDied","Data":"9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2"} Jan 22 16:55:14 crc kubenswrapper[5032]: I0122 16:55:14.285057 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wphtg" event={"ID":"06150632-ed2c-4ce8-96c5-312038b96e44","Type":"ContainerStarted","Data":"6b38334b2aad1df444a28548e7427cce121d10ddf80349d33629de4dc1d65dcc"} Jan 22 16:55:15 crc kubenswrapper[5032]: I0122 16:55:15.294888 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wphtg" event={"ID":"06150632-ed2c-4ce8-96c5-312038b96e44","Type":"ContainerStarted","Data":"bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d"} Jan 22 16:55:16 crc kubenswrapper[5032]: I0122 16:55:16.468258 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:55:16 crc kubenswrapper[5032]: E0122 16:55:16.469371 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:55:19 crc kubenswrapper[5032]: I0122 16:55:19.337307 5032 generic.go:334] "Generic (PLEG): container finished" podID="06150632-ed2c-4ce8-96c5-312038b96e44" containerID="bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d" exitCode=0 Jan 22 16:55:19 crc kubenswrapper[5032]: I0122 16:55:19.337365 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wphtg" event={"ID":"06150632-ed2c-4ce8-96c5-312038b96e44","Type":"ContainerDied","Data":"bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d"} Jan 22 16:55:20 crc kubenswrapper[5032]: I0122 16:55:20.348709 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wphtg" event={"ID":"06150632-ed2c-4ce8-96c5-312038b96e44","Type":"ContainerStarted","Data":"d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47"} Jan 22 16:55:20 crc kubenswrapper[5032]: I0122 16:55:20.371118 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wphtg" podStartSLOduration=2.89761284 podStartE2EDuration="8.371097073s" podCreationTimestamp="2026-01-22 16:55:12 +0000 UTC" firstStartedPulling="2026-01-22 16:55:14.287238144 +0000 UTC m=+3018.099407195" lastFinishedPulling="2026-01-22 16:55:19.760722397 +0000 UTC m=+3023.572891428" observedRunningTime="2026-01-22 16:55:20.366064039 +0000 UTC m=+3024.178233090" watchObservedRunningTime="2026-01-22 16:55:20.371097073 +0000 UTC m=+3024.183266134" Jan 22 16:55:23 crc kubenswrapper[5032]: I0122 16:55:23.200310 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:23 crc kubenswrapper[5032]: I0122 16:55:23.200793 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:24 crc kubenswrapper[5032]: I0122 16:55:24.252137 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wphtg" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="registry-server" probeResult="failure" output=< Jan 22 16:55:24 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 16:55:24 crc kubenswrapper[5032]: > Jan 22 16:55:30 crc kubenswrapper[5032]: I0122 16:55:30.458577 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:55:30 crc kubenswrapper[5032]: E0122 16:55:30.461367 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:55:33 crc kubenswrapper[5032]: I0122 16:55:33.281941 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:33 crc kubenswrapper[5032]: I0122 16:55:33.344555 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:33 crc kubenswrapper[5032]: I0122 16:55:33.526352 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wphtg"] Jan 22 16:55:34 crc kubenswrapper[5032]: I0122 16:55:34.498743 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wphtg" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="registry-server" containerID="cri-o://d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47" gracePeriod=2 Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.061548 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.176121 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-catalog-content\") pod \"06150632-ed2c-4ce8-96c5-312038b96e44\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.176232 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzqf5\" (UniqueName: \"kubernetes.io/projected/06150632-ed2c-4ce8-96c5-312038b96e44-kube-api-access-gzqf5\") pod \"06150632-ed2c-4ce8-96c5-312038b96e44\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.176275 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-utilities\") pod \"06150632-ed2c-4ce8-96c5-312038b96e44\" (UID: \"06150632-ed2c-4ce8-96c5-312038b96e44\") " Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.177296 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-utilities" (OuterVolumeSpecName: "utilities") pod "06150632-ed2c-4ce8-96c5-312038b96e44" (UID: "06150632-ed2c-4ce8-96c5-312038b96e44"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.182376 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06150632-ed2c-4ce8-96c5-312038b96e44-kube-api-access-gzqf5" (OuterVolumeSpecName: "kube-api-access-gzqf5") pod "06150632-ed2c-4ce8-96c5-312038b96e44" (UID: "06150632-ed2c-4ce8-96c5-312038b96e44"). InnerVolumeSpecName "kube-api-access-gzqf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.278467 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzqf5\" (UniqueName: \"kubernetes.io/projected/06150632-ed2c-4ce8-96c5-312038b96e44-kube-api-access-gzqf5\") on node \"crc\" DevicePath \"\"" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.278500 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.285881 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06150632-ed2c-4ce8-96c5-312038b96e44" (UID: "06150632-ed2c-4ce8-96c5-312038b96e44"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.380426 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06150632-ed2c-4ce8-96c5-312038b96e44-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.512161 5032 generic.go:334] "Generic (PLEG): container finished" podID="06150632-ed2c-4ce8-96c5-312038b96e44" containerID="d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47" exitCode=0 Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.512225 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wphtg" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.512224 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wphtg" event={"ID":"06150632-ed2c-4ce8-96c5-312038b96e44","Type":"ContainerDied","Data":"d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47"} Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.512332 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wphtg" event={"ID":"06150632-ed2c-4ce8-96c5-312038b96e44","Type":"ContainerDied","Data":"6b38334b2aad1df444a28548e7427cce121d10ddf80349d33629de4dc1d65dcc"} Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.512378 5032 scope.go:117] "RemoveContainer" containerID="d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.544810 5032 scope.go:117] "RemoveContainer" containerID="bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.550640 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wphtg"] Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.558328 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wphtg"] Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.570869 5032 scope.go:117] "RemoveContainer" containerID="9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.634647 5032 scope.go:117] "RemoveContainer" containerID="d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47" Jan 22 16:55:35 crc kubenswrapper[5032]: E0122 16:55:35.635225 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47\": container with ID starting with d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47 not found: ID does not exist" containerID="d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.635278 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47"} err="failed to get container status \"d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47\": rpc error: code = NotFound desc = could not find container \"d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47\": container with ID starting with d12ffa8c1dc97c1f90b954a5a368c635759701bfbb96f8a6ce6fe739717aeb47 not found: ID does not exist" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.635318 5032 scope.go:117] "RemoveContainer" containerID="bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d" Jan 22 16:55:35 crc kubenswrapper[5032]: E0122 16:55:35.635674 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d\": container with ID starting with bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d not found: ID does not exist" containerID="bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.635716 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d"} err="failed to get container status \"bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d\": rpc error: code = NotFound desc = could not find container \"bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d\": container with ID starting with bc96a8bb07fcf510c18727ba98913d0537f8996171bcadff760d5379c060947d not found: ID does not exist" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.635746 5032 scope.go:117] "RemoveContainer" containerID="9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2" Jan 22 16:55:35 crc kubenswrapper[5032]: E0122 16:55:35.636035 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2\": container with ID starting with 9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2 not found: ID does not exist" containerID="9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2" Jan 22 16:55:35 crc kubenswrapper[5032]: I0122 16:55:35.636068 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2"} err="failed to get container status \"9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2\": rpc error: code = NotFound desc = could not find container \"9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2\": container with ID starting with 9727976be3e35106c6388b26a9845ab748c8f6eed8ccdf3bdfc38b36a90da3b2 not found: ID does not exist" Jan 22 16:55:36 crc kubenswrapper[5032]: I0122 16:55:36.473613 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" path="/var/lib/kubelet/pods/06150632-ed2c-4ce8-96c5-312038b96e44/volumes" Jan 22 16:55:42 crc kubenswrapper[5032]: I0122 16:55:42.455069 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:55:42 crc kubenswrapper[5032]: E0122 16:55:42.455833 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:55:56 crc kubenswrapper[5032]: I0122 16:55:56.454170 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:55:56 crc kubenswrapper[5032]: E0122 16:55:56.454946 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:56:10 crc kubenswrapper[5032]: I0122 16:56:10.454576 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:56:10 crc kubenswrapper[5032]: E0122 16:56:10.455369 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:56:22 crc kubenswrapper[5032]: I0122 16:56:22.454972 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:56:22 crc kubenswrapper[5032]: E0122 16:56:22.455719 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:56:34 crc kubenswrapper[5032]: I0122 16:56:34.454980 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:56:34 crc kubenswrapper[5032]: E0122 16:56:34.455745 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:56:47 crc kubenswrapper[5032]: I0122 16:56:47.455416 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:56:47 crc kubenswrapper[5032]: E0122 16:56:47.456216 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:57:01 crc kubenswrapper[5032]: I0122 16:57:01.454484 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:57:01 crc kubenswrapper[5032]: E0122 16:57:01.455234 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:57:12 crc kubenswrapper[5032]: I0122 16:57:12.454942 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:57:12 crc kubenswrapper[5032]: E0122 16:57:12.455836 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:57:25 crc kubenswrapper[5032]: I0122 16:57:25.455538 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:57:25 crc kubenswrapper[5032]: E0122 16:57:25.456591 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:57:38 crc kubenswrapper[5032]: I0122 16:57:38.454768 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:57:38 crc kubenswrapper[5032]: E0122 16:57:38.455688 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:57:50 crc kubenswrapper[5032]: I0122 16:57:50.455914 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:57:50 crc kubenswrapper[5032]: E0122 16:57:50.457306 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:58:04 crc kubenswrapper[5032]: I0122 16:58:04.454712 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:58:04 crc kubenswrapper[5032]: E0122 16:58:04.455540 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.729977 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jtfkp"] Jan 22 16:58:13 crc kubenswrapper[5032]: E0122 16:58:13.731320 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="registry-server" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.731480 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="registry-server" Jan 22 16:58:13 crc kubenswrapper[5032]: E0122 16:58:13.731547 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="extract-content" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.731559 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="extract-content" Jan 22 16:58:13 crc kubenswrapper[5032]: E0122 16:58:13.731585 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="extract-utilities" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.731598 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="extract-utilities" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.731975 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="06150632-ed2c-4ce8-96c5-312038b96e44" containerName="registry-server" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.794558 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.814259 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jtfkp"] Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.895623 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-catalog-content\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.895684 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmbnf\" (UniqueName: \"kubernetes.io/projected/28d2c724-8ab6-4d61-8843-94c7954d0a69-kube-api-access-lmbnf\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.895823 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-utilities\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.997544 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-utilities\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.997688 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-catalog-content\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.997714 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmbnf\" (UniqueName: \"kubernetes.io/projected/28d2c724-8ab6-4d61-8843-94c7954d0a69-kube-api-access-lmbnf\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.998649 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-utilities\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:13 crc kubenswrapper[5032]: I0122 16:58:13.998943 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-catalog-content\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:14 crc kubenswrapper[5032]: I0122 16:58:14.018628 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmbnf\" (UniqueName: \"kubernetes.io/projected/28d2c724-8ab6-4d61-8843-94c7954d0a69-kube-api-access-lmbnf\") pod \"community-operators-jtfkp\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:14 crc kubenswrapper[5032]: I0122 16:58:14.130783 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:14 crc kubenswrapper[5032]: I0122 16:58:14.645176 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jtfkp"] Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.104631 5032 generic.go:334] "Generic (PLEG): container finished" podID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerID="91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88" exitCode=0 Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.104680 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jtfkp" event={"ID":"28d2c724-8ab6-4d61-8843-94c7954d0a69","Type":"ContainerDied","Data":"91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88"} Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.104708 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jtfkp" event={"ID":"28d2c724-8ab6-4d61-8843-94c7954d0a69","Type":"ContainerStarted","Data":"9b461db0de0a9a52c563f7ff9c101ef5e0059d44b596a4cb7d3526c4a5a5a490"} Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.106967 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.315692 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pp9f2"] Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.317730 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.330588 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pp9f2"] Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.423772 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjs68\" (UniqueName: \"kubernetes.io/projected/f1c4cbdb-4168-4c22-bf6d-de086b97612d-kube-api-access-tjs68\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.424061 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-catalog-content\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.424090 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-utilities\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.455247 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:58:15 crc kubenswrapper[5032]: E0122 16:58:15.455504 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.525362 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-utilities\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.525584 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjs68\" (UniqueName: \"kubernetes.io/projected/f1c4cbdb-4168-4c22-bf6d-de086b97612d-kube-api-access-tjs68\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.525636 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-catalog-content\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.526277 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-utilities\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.526339 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-catalog-content\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.548290 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjs68\" (UniqueName: \"kubernetes.io/projected/f1c4cbdb-4168-4c22-bf6d-de086b97612d-kube-api-access-tjs68\") pod \"redhat-marketplace-pp9f2\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:15 crc kubenswrapper[5032]: I0122 16:58:15.690266 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:16 crc kubenswrapper[5032]: I0122 16:58:16.113490 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jtfkp" event={"ID":"28d2c724-8ab6-4d61-8843-94c7954d0a69","Type":"ContainerStarted","Data":"71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12"} Jan 22 16:58:16 crc kubenswrapper[5032]: I0122 16:58:16.260714 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pp9f2"] Jan 22 16:58:16 crc kubenswrapper[5032]: W0122 16:58:16.265399 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1c4cbdb_4168_4c22_bf6d_de086b97612d.slice/crio-7212066ad1b758653e3d355729f4a2a1919421b314b495621eea7c9d783015e8 WatchSource:0}: Error finding container 7212066ad1b758653e3d355729f4a2a1919421b314b495621eea7c9d783015e8: Status 404 returned error can't find the container with id 7212066ad1b758653e3d355729f4a2a1919421b314b495621eea7c9d783015e8 Jan 22 16:58:17 crc kubenswrapper[5032]: I0122 16:58:17.123304 5032 generic.go:334] "Generic (PLEG): container finished" podID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerID="4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504" exitCode=0 Jan 22 16:58:17 crc kubenswrapper[5032]: I0122 16:58:17.123695 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pp9f2" event={"ID":"f1c4cbdb-4168-4c22-bf6d-de086b97612d","Type":"ContainerDied","Data":"4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504"} Jan 22 16:58:17 crc kubenswrapper[5032]: I0122 16:58:17.123726 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pp9f2" event={"ID":"f1c4cbdb-4168-4c22-bf6d-de086b97612d","Type":"ContainerStarted","Data":"7212066ad1b758653e3d355729f4a2a1919421b314b495621eea7c9d783015e8"} Jan 22 16:58:17 crc kubenswrapper[5032]: I0122 16:58:17.130623 5032 generic.go:334] "Generic (PLEG): container finished" podID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerID="71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12" exitCode=0 Jan 22 16:58:17 crc kubenswrapper[5032]: I0122 16:58:17.130698 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jtfkp" event={"ID":"28d2c724-8ab6-4d61-8843-94c7954d0a69","Type":"ContainerDied","Data":"71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12"} Jan 22 16:58:18 crc kubenswrapper[5032]: I0122 16:58:18.140120 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pp9f2" event={"ID":"f1c4cbdb-4168-4c22-bf6d-de086b97612d","Type":"ContainerStarted","Data":"9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23"} Jan 22 16:58:18 crc kubenswrapper[5032]: I0122 16:58:18.144077 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jtfkp" event={"ID":"28d2c724-8ab6-4d61-8843-94c7954d0a69","Type":"ContainerStarted","Data":"01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc"} Jan 22 16:58:18 crc kubenswrapper[5032]: I0122 16:58:18.190004 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jtfkp" podStartSLOduration=2.703781768 podStartE2EDuration="5.189979727s" podCreationTimestamp="2026-01-22 16:58:13 +0000 UTC" firstStartedPulling="2026-01-22 16:58:15.106702847 +0000 UTC m=+3198.918871878" lastFinishedPulling="2026-01-22 16:58:17.592900806 +0000 UTC m=+3201.405069837" observedRunningTime="2026-01-22 16:58:18.178789198 +0000 UTC m=+3201.990958229" watchObservedRunningTime="2026-01-22 16:58:18.189979727 +0000 UTC m=+3202.002148758" Jan 22 16:58:19 crc kubenswrapper[5032]: I0122 16:58:19.156136 5032 generic.go:334] "Generic (PLEG): container finished" podID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerID="9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23" exitCode=0 Jan 22 16:58:19 crc kubenswrapper[5032]: I0122 16:58:19.156228 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pp9f2" event={"ID":"f1c4cbdb-4168-4c22-bf6d-de086b97612d","Type":"ContainerDied","Data":"9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23"} Jan 22 16:58:20 crc kubenswrapper[5032]: I0122 16:58:20.167057 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pp9f2" event={"ID":"f1c4cbdb-4168-4c22-bf6d-de086b97612d","Type":"ContainerStarted","Data":"e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01"} Jan 22 16:58:20 crc kubenswrapper[5032]: I0122 16:58:20.192516 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pp9f2" podStartSLOduration=2.705848806 podStartE2EDuration="5.192494486s" podCreationTimestamp="2026-01-22 16:58:15 +0000 UTC" firstStartedPulling="2026-01-22 16:58:17.127024723 +0000 UTC m=+3200.939193754" lastFinishedPulling="2026-01-22 16:58:19.613670363 +0000 UTC m=+3203.425839434" observedRunningTime="2026-01-22 16:58:20.184875873 +0000 UTC m=+3203.997044924" watchObservedRunningTime="2026-01-22 16:58:20.192494486 +0000 UTC m=+3204.004663517" Jan 22 16:58:24 crc kubenswrapper[5032]: I0122 16:58:24.130982 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:24 crc kubenswrapper[5032]: I0122 16:58:24.131606 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:24 crc kubenswrapper[5032]: I0122 16:58:24.196329 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:24 crc kubenswrapper[5032]: I0122 16:58:24.275796 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:24 crc kubenswrapper[5032]: I0122 16:58:24.913544 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jtfkp"] Jan 22 16:58:25 crc kubenswrapper[5032]: I0122 16:58:25.690419 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:25 crc kubenswrapper[5032]: I0122 16:58:25.690810 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:25 crc kubenswrapper[5032]: I0122 16:58:25.766637 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.249691 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jtfkp" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="registry-server" containerID="cri-o://01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc" gracePeriod=2 Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.304230 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.810492 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.893579 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-utilities\") pod \"28d2c724-8ab6-4d61-8843-94c7954d0a69\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.893988 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-catalog-content\") pod \"28d2c724-8ab6-4d61-8843-94c7954d0a69\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.894086 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmbnf\" (UniqueName: \"kubernetes.io/projected/28d2c724-8ab6-4d61-8843-94c7954d0a69-kube-api-access-lmbnf\") pod \"28d2c724-8ab6-4d61-8843-94c7954d0a69\" (UID: \"28d2c724-8ab6-4d61-8843-94c7954d0a69\") " Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.894685 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-utilities" (OuterVolumeSpecName: "utilities") pod "28d2c724-8ab6-4d61-8843-94c7954d0a69" (UID: "28d2c724-8ab6-4d61-8843-94c7954d0a69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.901303 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28d2c724-8ab6-4d61-8843-94c7954d0a69-kube-api-access-lmbnf" (OuterVolumeSpecName: "kube-api-access-lmbnf") pod "28d2c724-8ab6-4d61-8843-94c7954d0a69" (UID: "28d2c724-8ab6-4d61-8843-94c7954d0a69"). InnerVolumeSpecName "kube-api-access-lmbnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.945394 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28d2c724-8ab6-4d61-8843-94c7954d0a69" (UID: "28d2c724-8ab6-4d61-8843-94c7954d0a69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.996163 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.996584 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmbnf\" (UniqueName: \"kubernetes.io/projected/28d2c724-8ab6-4d61-8843-94c7954d0a69-kube-api-access-lmbnf\") on node \"crc\" DevicePath \"\"" Jan 22 16:58:26 crc kubenswrapper[5032]: I0122 16:58:26.996601 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28d2c724-8ab6-4d61-8843-94c7954d0a69-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.266083 5032 generic.go:334] "Generic (PLEG): container finished" podID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerID="01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc" exitCode=0 Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.266168 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jtfkp" event={"ID":"28d2c724-8ab6-4d61-8843-94c7954d0a69","Type":"ContainerDied","Data":"01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc"} Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.266485 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jtfkp" event={"ID":"28d2c724-8ab6-4d61-8843-94c7954d0a69","Type":"ContainerDied","Data":"9b461db0de0a9a52c563f7ff9c101ef5e0059d44b596a4cb7d3526c4a5a5a490"} Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.266529 5032 scope.go:117] "RemoveContainer" containerID="01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.266191 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jtfkp" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.293767 5032 scope.go:117] "RemoveContainer" containerID="71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.319538 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jtfkp"] Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.327298 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jtfkp"] Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.343983 5032 scope.go:117] "RemoveContainer" containerID="91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.381031 5032 scope.go:117] "RemoveContainer" containerID="01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc" Jan 22 16:58:27 crc kubenswrapper[5032]: E0122 16:58:27.381486 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc\": container with ID starting with 01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc not found: ID does not exist" containerID="01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.381552 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc"} err="failed to get container status \"01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc\": rpc error: code = NotFound desc = could not find container \"01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc\": container with ID starting with 01b084f406078ffbaa53a0479eab96b2db93b68b7782d9a4cecfe28d5d4405bc not found: ID does not exist" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.381593 5032 scope.go:117] "RemoveContainer" containerID="71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12" Jan 22 16:58:27 crc kubenswrapper[5032]: E0122 16:58:27.381994 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12\": container with ID starting with 71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12 not found: ID does not exist" containerID="71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.382038 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12"} err="failed to get container status \"71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12\": rpc error: code = NotFound desc = could not find container \"71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12\": container with ID starting with 71ee6c8da0b51114b4c7bcf8734adcd80f429bc39460ee6130df89d130b6fd12 not found: ID does not exist" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.382064 5032 scope.go:117] "RemoveContainer" containerID="91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88" Jan 22 16:58:27 crc kubenswrapper[5032]: E0122 16:58:27.382393 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88\": container with ID starting with 91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88 not found: ID does not exist" containerID="91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.382467 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88"} err="failed to get container status \"91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88\": rpc error: code = NotFound desc = could not find container \"91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88\": container with ID starting with 91f6f557f9c584dcf41544390a5f7227c06d2019cf91c382017d91df3fbccf88 not found: ID does not exist" Jan 22 16:58:27 crc kubenswrapper[5032]: I0122 16:58:27.711330 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pp9f2"] Jan 22 16:58:28 crc kubenswrapper[5032]: I0122 16:58:28.282116 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pp9f2" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="registry-server" containerID="cri-o://e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01" gracePeriod=2 Jan 22 16:58:28 crc kubenswrapper[5032]: I0122 16:58:28.455063 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:58:28 crc kubenswrapper[5032]: E0122 16:58:28.455361 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:58:28 crc kubenswrapper[5032]: I0122 16:58:28.473229 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" path="/var/lib/kubelet/pods/28d2c724-8ab6-4d61-8843-94c7954d0a69/volumes" Jan 22 16:58:28 crc kubenswrapper[5032]: I0122 16:58:28.937157 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.053532 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjs68\" (UniqueName: \"kubernetes.io/projected/f1c4cbdb-4168-4c22-bf6d-de086b97612d-kube-api-access-tjs68\") pod \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.053646 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-catalog-content\") pod \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.053954 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-utilities\") pod \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\" (UID: \"f1c4cbdb-4168-4c22-bf6d-de086b97612d\") " Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.055284 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-utilities" (OuterVolumeSpecName: "utilities") pod "f1c4cbdb-4168-4c22-bf6d-de086b97612d" (UID: "f1c4cbdb-4168-4c22-bf6d-de086b97612d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.056058 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.060851 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1c4cbdb-4168-4c22-bf6d-de086b97612d-kube-api-access-tjs68" (OuterVolumeSpecName: "kube-api-access-tjs68") pod "f1c4cbdb-4168-4c22-bf6d-de086b97612d" (UID: "f1c4cbdb-4168-4c22-bf6d-de086b97612d"). InnerVolumeSpecName "kube-api-access-tjs68". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.075556 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f1c4cbdb-4168-4c22-bf6d-de086b97612d" (UID: "f1c4cbdb-4168-4c22-bf6d-de086b97612d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.157387 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjs68\" (UniqueName: \"kubernetes.io/projected/f1c4cbdb-4168-4c22-bf6d-de086b97612d-kube-api-access-tjs68\") on node \"crc\" DevicePath \"\"" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.157706 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1c4cbdb-4168-4c22-bf6d-de086b97612d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.292587 5032 generic.go:334] "Generic (PLEG): container finished" podID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerID="e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01" exitCode=0 Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.292707 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pp9f2" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.292738 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pp9f2" event={"ID":"f1c4cbdb-4168-4c22-bf6d-de086b97612d","Type":"ContainerDied","Data":"e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01"} Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.294082 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pp9f2" event={"ID":"f1c4cbdb-4168-4c22-bf6d-de086b97612d","Type":"ContainerDied","Data":"7212066ad1b758653e3d355729f4a2a1919421b314b495621eea7c9d783015e8"} Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.294115 5032 scope.go:117] "RemoveContainer" containerID="e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.330677 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pp9f2"] Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.335475 5032 scope.go:117] "RemoveContainer" containerID="9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.343447 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pp9f2"] Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.361574 5032 scope.go:117] "RemoveContainer" containerID="4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.401435 5032 scope.go:117] "RemoveContainer" containerID="e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01" Jan 22 16:58:29 crc kubenswrapper[5032]: E0122 16:58:29.402153 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01\": container with ID starting with e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01 not found: ID does not exist" containerID="e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.402269 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01"} err="failed to get container status \"e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01\": rpc error: code = NotFound desc = could not find container \"e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01\": container with ID starting with e3faabad898688a40aa7ee5aaebbcb841886363f4ee77af873ebdced6aa75a01 not found: ID does not exist" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.402312 5032 scope.go:117] "RemoveContainer" containerID="9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23" Jan 22 16:58:29 crc kubenswrapper[5032]: E0122 16:58:29.402890 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23\": container with ID starting with 9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23 not found: ID does not exist" containerID="9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.402956 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23"} err="failed to get container status \"9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23\": rpc error: code = NotFound desc = could not find container \"9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23\": container with ID starting with 9b1a9cdf3f9e84307debd601f946f2be715e821895abd2a673fd8bb13efece23 not found: ID does not exist" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.403002 5032 scope.go:117] "RemoveContainer" containerID="4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504" Jan 22 16:58:29 crc kubenswrapper[5032]: E0122 16:58:29.403558 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504\": container with ID starting with 4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504 not found: ID does not exist" containerID="4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504" Jan 22 16:58:29 crc kubenswrapper[5032]: I0122 16:58:29.403767 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504"} err="failed to get container status \"4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504\": rpc error: code = NotFound desc = could not find container \"4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504\": container with ID starting with 4099b6090cb3d679f4ae236e4df6e93415ced01c44716120e6df134a06bf6504 not found: ID does not exist" Jan 22 16:58:30 crc kubenswrapper[5032]: I0122 16:58:30.463782 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" path="/var/lib/kubelet/pods/f1c4cbdb-4168-4c22-bf6d-de086b97612d/volumes" Jan 22 16:58:42 crc kubenswrapper[5032]: I0122 16:58:42.459236 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:58:42 crc kubenswrapper[5032]: E0122 16:58:42.459837 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:58:54 crc kubenswrapper[5032]: I0122 16:58:54.455198 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:58:54 crc kubenswrapper[5032]: E0122 16:58:54.456316 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 16:59:05 crc kubenswrapper[5032]: I0122 16:59:05.454953 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 16:59:05 crc kubenswrapper[5032]: I0122 16:59:05.686022 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"0274cd20289aab10b54de0cf96b7e843ff319268a1d817fcd26270ae9cbb04a2"} Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.160229 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t"] Jan 22 17:00:00 crc kubenswrapper[5032]: E0122 17:00:00.161260 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="extract-utilities" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161277 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="extract-utilities" Jan 22 17:00:00 crc kubenswrapper[5032]: E0122 17:00:00.161296 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="extract-utilities" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161303 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="extract-utilities" Jan 22 17:00:00 crc kubenswrapper[5032]: E0122 17:00:00.161326 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="extract-content" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161335 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="extract-content" Jan 22 17:00:00 crc kubenswrapper[5032]: E0122 17:00:00.161355 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="registry-server" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161361 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="registry-server" Jan 22 17:00:00 crc kubenswrapper[5032]: E0122 17:00:00.161370 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="extract-content" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161377 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="extract-content" Jan 22 17:00:00 crc kubenswrapper[5032]: E0122 17:00:00.161390 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="registry-server" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161397 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="registry-server" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161608 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="28d2c724-8ab6-4d61-8843-94c7954d0a69" containerName="registry-server" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.161628 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1c4cbdb-4168-4c22-bf6d-de086b97612d" containerName="registry-server" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.162396 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.165348 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.165417 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.201497 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t"] Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.282529 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cf468fca-eadf-4bae-8d37-1d287a5f2a25-config-volume\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.282626 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb7v9\" (UniqueName: \"kubernetes.io/projected/cf468fca-eadf-4bae-8d37-1d287a5f2a25-kube-api-access-mb7v9\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.282659 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cf468fca-eadf-4bae-8d37-1d287a5f2a25-secret-volume\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.384192 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cf468fca-eadf-4bae-8d37-1d287a5f2a25-config-volume\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.384514 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb7v9\" (UniqueName: \"kubernetes.io/projected/cf468fca-eadf-4bae-8d37-1d287a5f2a25-kube-api-access-mb7v9\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.384549 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cf468fca-eadf-4bae-8d37-1d287a5f2a25-secret-volume\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.385268 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cf468fca-eadf-4bae-8d37-1d287a5f2a25-config-volume\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.391082 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cf468fca-eadf-4bae-8d37-1d287a5f2a25-secret-volume\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.400367 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb7v9\" (UniqueName: \"kubernetes.io/projected/cf468fca-eadf-4bae-8d37-1d287a5f2a25-kube-api-access-mb7v9\") pod \"collect-profiles-29485020-wwr9t\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:00 crc kubenswrapper[5032]: I0122 17:00:00.575542 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:01 crc kubenswrapper[5032]: I0122 17:00:01.061591 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t"] Jan 22 17:00:01 crc kubenswrapper[5032]: I0122 17:00:01.305274 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" event={"ID":"cf468fca-eadf-4bae-8d37-1d287a5f2a25","Type":"ContainerStarted","Data":"e91408ecbf588fe43b0683b70ca6b7f5a125faa7e9387cb7f5fd19bf5baf8595"} Jan 22 17:00:01 crc kubenswrapper[5032]: I0122 17:00:01.305774 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" event={"ID":"cf468fca-eadf-4bae-8d37-1d287a5f2a25","Type":"ContainerStarted","Data":"9258d32ed7ab78073ffbbf6dfe19e927ec04c3011feadfee9c2c20ef739bd470"} Jan 22 17:00:01 crc kubenswrapper[5032]: I0122 17:00:01.330885 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" podStartSLOduration=1.330833766 podStartE2EDuration="1.330833766s" podCreationTimestamp="2026-01-22 17:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 17:00:01.323631624 +0000 UTC m=+3305.135800655" watchObservedRunningTime="2026-01-22 17:00:01.330833766 +0000 UTC m=+3305.143002797" Jan 22 17:00:02 crc kubenswrapper[5032]: I0122 17:00:02.314995 5032 generic.go:334] "Generic (PLEG): container finished" podID="cf468fca-eadf-4bae-8d37-1d287a5f2a25" containerID="e91408ecbf588fe43b0683b70ca6b7f5a125faa7e9387cb7f5fd19bf5baf8595" exitCode=0 Jan 22 17:00:02 crc kubenswrapper[5032]: I0122 17:00:02.315092 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" event={"ID":"cf468fca-eadf-4bae-8d37-1d287a5f2a25","Type":"ContainerDied","Data":"e91408ecbf588fe43b0683b70ca6b7f5a125faa7e9387cb7f5fd19bf5baf8595"} Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.758496 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.851910 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb7v9\" (UniqueName: \"kubernetes.io/projected/cf468fca-eadf-4bae-8d37-1d287a5f2a25-kube-api-access-mb7v9\") pod \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.852165 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cf468fca-eadf-4bae-8d37-1d287a5f2a25-config-volume\") pod \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.852225 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cf468fca-eadf-4bae-8d37-1d287a5f2a25-secret-volume\") pod \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\" (UID: \"cf468fca-eadf-4bae-8d37-1d287a5f2a25\") " Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.854311 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf468fca-eadf-4bae-8d37-1d287a5f2a25-config-volume" (OuterVolumeSpecName: "config-volume") pod "cf468fca-eadf-4bae-8d37-1d287a5f2a25" (UID: "cf468fca-eadf-4bae-8d37-1d287a5f2a25"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.859391 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf468fca-eadf-4bae-8d37-1d287a5f2a25-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cf468fca-eadf-4bae-8d37-1d287a5f2a25" (UID: "cf468fca-eadf-4bae-8d37-1d287a5f2a25"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.859508 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf468fca-eadf-4bae-8d37-1d287a5f2a25-kube-api-access-mb7v9" (OuterVolumeSpecName: "kube-api-access-mb7v9") pod "cf468fca-eadf-4bae-8d37-1d287a5f2a25" (UID: "cf468fca-eadf-4bae-8d37-1d287a5f2a25"). InnerVolumeSpecName "kube-api-access-mb7v9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.954282 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb7v9\" (UniqueName: \"kubernetes.io/projected/cf468fca-eadf-4bae-8d37-1d287a5f2a25-kube-api-access-mb7v9\") on node \"crc\" DevicePath \"\"" Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.954585 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cf468fca-eadf-4bae-8d37-1d287a5f2a25-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:00:03 crc kubenswrapper[5032]: I0122 17:00:03.954595 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cf468fca-eadf-4bae-8d37-1d287a5f2a25-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:00:04 crc kubenswrapper[5032]: I0122 17:00:04.336296 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" event={"ID":"cf468fca-eadf-4bae-8d37-1d287a5f2a25","Type":"ContainerDied","Data":"9258d32ed7ab78073ffbbf6dfe19e927ec04c3011feadfee9c2c20ef739bd470"} Jan 22 17:00:04 crc kubenswrapper[5032]: I0122 17:00:04.336352 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9258d32ed7ab78073ffbbf6dfe19e927ec04c3011feadfee9c2c20ef739bd470" Jan 22 17:00:04 crc kubenswrapper[5032]: I0122 17:00:04.336421 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t" Jan 22 17:00:04 crc kubenswrapper[5032]: I0122 17:00:04.421097 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v"] Jan 22 17:00:04 crc kubenswrapper[5032]: I0122 17:00:04.452803 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484975-94q7v"] Jan 22 17:00:04 crc kubenswrapper[5032]: I0122 17:00:04.466763 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ab0b48d-dd7e-42f2-94a1-1be5a35b967f" path="/var/lib/kubelet/pods/8ab0b48d-dd7e-42f2-94a1-1be5a35b967f/volumes" Jan 22 17:00:20 crc kubenswrapper[5032]: I0122 17:00:20.231516 5032 scope.go:117] "RemoveContainer" containerID="88ef6b06f7af604c85265e919a71b300549c9a92c6522d45f42e9d4c747c8b77" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.158228 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29485021-rk9mk"] Jan 22 17:01:00 crc kubenswrapper[5032]: E0122 17:01:00.159285 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf468fca-eadf-4bae-8d37-1d287a5f2a25" containerName="collect-profiles" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.159303 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf468fca-eadf-4bae-8d37-1d287a5f2a25" containerName="collect-profiles" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.159538 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf468fca-eadf-4bae-8d37-1d287a5f2a25" containerName="collect-profiles" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.160566 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.171249 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29485021-rk9mk"] Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.330109 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-config-data\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.330287 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-combined-ca-bundle\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.330668 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2htt\" (UniqueName: \"kubernetes.io/projected/0330603b-2b77-4011-a1e7-ff93c8c9c36b-kube-api-access-w2htt\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.330746 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-fernet-keys\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.433232 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2htt\" (UniqueName: \"kubernetes.io/projected/0330603b-2b77-4011-a1e7-ff93c8c9c36b-kube-api-access-w2htt\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.433373 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-fernet-keys\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.433500 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-config-data\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.433573 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-combined-ca-bundle\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.442210 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-config-data\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.442370 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-combined-ca-bundle\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.447301 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-fernet-keys\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.459475 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2htt\" (UniqueName: \"kubernetes.io/projected/0330603b-2b77-4011-a1e7-ff93c8c9c36b-kube-api-access-w2htt\") pod \"keystone-cron-29485021-rk9mk\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:00 crc kubenswrapper[5032]: I0122 17:01:00.492821 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:01 crc kubenswrapper[5032]: I0122 17:01:01.028198 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29485021-rk9mk"] Jan 22 17:01:01 crc kubenswrapper[5032]: I0122 17:01:01.998214 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485021-rk9mk" event={"ID":"0330603b-2b77-4011-a1e7-ff93c8c9c36b","Type":"ContainerStarted","Data":"affce490726c49dde02d8a8b779949c65733544d14412d92e36b52cce9868f9a"} Jan 22 17:01:01 crc kubenswrapper[5032]: I0122 17:01:01.998691 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485021-rk9mk" event={"ID":"0330603b-2b77-4011-a1e7-ff93c8c9c36b","Type":"ContainerStarted","Data":"e2467bed32e451f522dbf6004900e62621ce9e325b33d58eaafdadde73c658e1"} Jan 22 17:01:02 crc kubenswrapper[5032]: I0122 17:01:02.027770 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29485021-rk9mk" podStartSLOduration=2.027748948 podStartE2EDuration="2.027748948s" podCreationTimestamp="2026-01-22 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 17:01:02.016759255 +0000 UTC m=+3365.828928286" watchObservedRunningTime="2026-01-22 17:01:02.027748948 +0000 UTC m=+3365.839917989" Jan 22 17:01:04 crc kubenswrapper[5032]: I0122 17:01:04.022408 5032 generic.go:334] "Generic (PLEG): container finished" podID="0330603b-2b77-4011-a1e7-ff93c8c9c36b" containerID="affce490726c49dde02d8a8b779949c65733544d14412d92e36b52cce9868f9a" exitCode=0 Jan 22 17:01:04 crc kubenswrapper[5032]: I0122 17:01:04.022530 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485021-rk9mk" event={"ID":"0330603b-2b77-4011-a1e7-ff93c8c9c36b","Type":"ContainerDied","Data":"affce490726c49dde02d8a8b779949c65733544d14412d92e36b52cce9868f9a"} Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.480445 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.640041 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-config-data\") pod \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.640355 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-combined-ca-bundle\") pod \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.640419 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-fernet-keys\") pod \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.640631 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2htt\" (UniqueName: \"kubernetes.io/projected/0330603b-2b77-4011-a1e7-ff93c8c9c36b-kube-api-access-w2htt\") pod \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\" (UID: \"0330603b-2b77-4011-a1e7-ff93c8c9c36b\") " Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.648511 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0330603b-2b77-4011-a1e7-ff93c8c9c36b-kube-api-access-w2htt" (OuterVolumeSpecName: "kube-api-access-w2htt") pod "0330603b-2b77-4011-a1e7-ff93c8c9c36b" (UID: "0330603b-2b77-4011-a1e7-ff93c8c9c36b"). InnerVolumeSpecName "kube-api-access-w2htt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.650653 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0330603b-2b77-4011-a1e7-ff93c8c9c36b" (UID: "0330603b-2b77-4011-a1e7-ff93c8c9c36b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.666547 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0330603b-2b77-4011-a1e7-ff93c8c9c36b" (UID: "0330603b-2b77-4011-a1e7-ff93c8c9c36b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.719839 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-config-data" (OuterVolumeSpecName: "config-data") pod "0330603b-2b77-4011-a1e7-ff93c8c9c36b" (UID: "0330603b-2b77-4011-a1e7-ff93c8c9c36b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.742980 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.743013 5032 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.743022 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2htt\" (UniqueName: \"kubernetes.io/projected/0330603b-2b77-4011-a1e7-ff93c8c9c36b-kube-api-access-w2htt\") on node \"crc\" DevicePath \"\"" Jan 22 17:01:05 crc kubenswrapper[5032]: I0122 17:01:05.743031 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0330603b-2b77-4011-a1e7-ff93c8c9c36b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 17:01:06 crc kubenswrapper[5032]: I0122 17:01:06.042423 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485021-rk9mk" event={"ID":"0330603b-2b77-4011-a1e7-ff93c8c9c36b","Type":"ContainerDied","Data":"e2467bed32e451f522dbf6004900e62621ce9e325b33d58eaafdadde73c658e1"} Jan 22 17:01:06 crc kubenswrapper[5032]: I0122 17:01:06.042487 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2467bed32e451f522dbf6004900e62621ce9e325b33d58eaafdadde73c658e1" Jan 22 17:01:06 crc kubenswrapper[5032]: I0122 17:01:06.042519 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485021-rk9mk" Jan 22 17:01:33 crc kubenswrapper[5032]: I0122 17:01:33.016601 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:01:33 crc kubenswrapper[5032]: I0122 17:01:33.017196 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:02:03 crc kubenswrapper[5032]: I0122 17:02:03.016695 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:02:03 crc kubenswrapper[5032]: I0122 17:02:03.017315 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.017488 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.018053 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.018142 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.018986 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0274cd20289aab10b54de0cf96b7e843ff319268a1d817fcd26270ae9cbb04a2"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.019046 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://0274cd20289aab10b54de0cf96b7e843ff319268a1d817fcd26270ae9cbb04a2" gracePeriod=600 Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.932200 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="0274cd20289aab10b54de0cf96b7e843ff319268a1d817fcd26270ae9cbb04a2" exitCode=0 Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.932727 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"0274cd20289aab10b54de0cf96b7e843ff319268a1d817fcd26270ae9cbb04a2"} Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.932878 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629"} Jan 22 17:02:33 crc kubenswrapper[5032]: I0122 17:02:33.932902 5032 scope.go:117] "RemoveContainer" containerID="5a1f87b0245634ba5fcad24550df080883297da6488fd878345fe2406db7d54a" Jan 22 17:04:33 crc kubenswrapper[5032]: I0122 17:04:33.017211 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:04:33 crc kubenswrapper[5032]: I0122 17:04:33.017827 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:05:03 crc kubenswrapper[5032]: I0122 17:05:03.017108 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:05:03 crc kubenswrapper[5032]: I0122 17:05:03.017688 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:05:05 crc kubenswrapper[5032]: I0122 17:05:05.731204 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 22 17:05:09 crc kubenswrapper[5032]: I0122 17:05:09.449144 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="e831a62a-4003-41ec-b3ac-ba0d29a91d95" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.168:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 17:05:10 crc kubenswrapper[5032]: I0122 17:05:10.733933 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 22 17:05:14 crc kubenswrapper[5032]: I0122 17:05:14.492158 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="e831a62a-4003-41ec-b3ac-ba0d29a91d95" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.168:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 17:05:15 crc kubenswrapper[5032]: I0122 17:05:15.733547 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 22 17:05:15 crc kubenswrapper[5032]: I0122 17:05:15.733722 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Jan 22 17:05:15 crc kubenswrapper[5032]: I0122 17:05:15.734960 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-central-agent" containerStatusID={"Type":"cri-o","ID":"867e3e7ca9ef86730c2cf37ce0362ea03d5fb5cad1cd93017b481be88b9b3fc7"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-central-agent failed liveness probe, will be restarted" Jan 22 17:05:15 crc kubenswrapper[5032]: I0122 17:05:15.735139 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-central-agent" containerID="cri-o://867e3e7ca9ef86730c2cf37ce0362ea03d5fb5cad1cd93017b481be88b9b3fc7" gracePeriod=30 Jan 22 17:05:19 crc kubenswrapper[5032]: I0122 17:05:19.534329 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="e831a62a-4003-41ec-b3ac-ba0d29a91d95" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.168:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 17:05:19 crc kubenswrapper[5032]: I0122 17:05:19.535165 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 17:05:19 crc kubenswrapper[5032]: I0122 17:05:19.536389 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-scheduler" containerStatusID={"Type":"cri-o","ID":"392346e95511f9e773c8a4027153ea1358a1abe20ee1967ad00faf07032136e6"} pod="openstack/cinder-scheduler-0" containerMessage="Container cinder-scheduler failed liveness probe, will be restarted" Jan 22 17:05:19 crc kubenswrapper[5032]: I0122 17:05:19.536484 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="e831a62a-4003-41ec-b3ac-ba0d29a91d95" containerName="cinder-scheduler" containerID="cri-o://392346e95511f9e773c8a4027153ea1358a1abe20ee1967ad00faf07032136e6" gracePeriod=30 Jan 22 17:05:25 crc kubenswrapper[5032]: I0122 17:05:25.732358 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 22 17:05:33 crc kubenswrapper[5032]: I0122 17:05:33.017132 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:05:33 crc kubenswrapper[5032]: I0122 17:05:33.017629 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:05:33 crc kubenswrapper[5032]: I0122 17:05:33.017678 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:05:33 crc kubenswrapper[5032]: I0122 17:05:33.018477 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:05:33 crc kubenswrapper[5032]: I0122 17:05:33.018552 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" gracePeriod=600 Jan 22 17:05:41 crc kubenswrapper[5032]: I0122 17:05:41.621309 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-config-operator_machine-config-daemon-6qsfs_fee2227c-a587-4ec3-909d-bd355aa116d4/machine-config-daemon/13.log" Jan 22 17:05:41 crc kubenswrapper[5032]: I0122 17:05:41.623157 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" exitCode=-1 Jan 22 17:05:41 crc kubenswrapper[5032]: I0122 17:05:41.623221 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629"} Jan 22 17:05:41 crc kubenswrapper[5032]: I0122 17:05:41.623276 5032 scope.go:117] "RemoveContainer" containerID="0274cd20289aab10b54de0cf96b7e843ff319268a1d817fcd26270ae9cbb04a2" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.411378 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-66ntp"] Jan 22 17:05:46 crc kubenswrapper[5032]: E0122 17:05:46.412276 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0330603b-2b77-4011-a1e7-ff93c8c9c36b" containerName="keystone-cron" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.412294 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="0330603b-2b77-4011-a1e7-ff93c8c9c36b" containerName="keystone-cron" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.412521 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="0330603b-2b77-4011-a1e7-ff93c8c9c36b" containerName="keystone-cron" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.414210 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.427313 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-66ntp"] Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.535751 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-catalog-content\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.536290 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqn78\" (UniqueName: \"kubernetes.io/projected/1a8f6dee-690f-41aa-a69d-abbd50390aa1-kube-api-access-dqn78\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.536988 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-utilities\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.639080 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqn78\" (UniqueName: \"kubernetes.io/projected/1a8f6dee-690f-41aa-a69d-abbd50390aa1-kube-api-access-dqn78\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.639325 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-utilities\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.639440 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-catalog-content\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.640395 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-utilities\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.640573 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-catalog-content\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.664527 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqn78\" (UniqueName: \"kubernetes.io/projected/1a8f6dee-690f-41aa-a69d-abbd50390aa1-kube-api-access-dqn78\") pod \"redhat-operators-66ntp\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:46 crc kubenswrapper[5032]: I0122 17:05:46.739798 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:05:47 crc kubenswrapper[5032]: I0122 17:05:47.216107 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-66ntp"] Jan 22 17:05:47 crc kubenswrapper[5032]: I0122 17:05:47.688508 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66ntp" event={"ID":"1a8f6dee-690f-41aa-a69d-abbd50390aa1","Type":"ContainerStarted","Data":"ab1e029c0aa60b5bb7f1f8b1b0cdb4b11219f9e4c38b631ade03cbf360fe940d"} Jan 22 17:05:55 crc kubenswrapper[5032]: I0122 17:05:55.734198 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 22 17:05:57 crc kubenswrapper[5032]: I0122 17:05:57.495274 5032 generic.go:334] "Generic (PLEG): container finished" podID="e831a62a-4003-41ec-b3ac-ba0d29a91d95" containerID="392346e95511f9e773c8a4027153ea1358a1abe20ee1967ad00faf07032136e6" exitCode=-1 Jan 22 17:05:57 crc kubenswrapper[5032]: I0122 17:05:57.495384 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e831a62a-4003-41ec-b3ac-ba0d29a91d95","Type":"ContainerDied","Data":"392346e95511f9e773c8a4027153ea1358a1abe20ee1967ad00faf07032136e6"} Jan 22 17:06:10 crc kubenswrapper[5032]: I0122 17:06:10.341594 5032 generic.go:334] "Generic (PLEG): container finished" podID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerID="867e3e7ca9ef86730c2cf37ce0362ea03d5fb5cad1cd93017b481be88b9b3fc7" exitCode=-1 Jan 22 17:06:10 crc kubenswrapper[5032]: I0122 17:06:10.341669 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerDied","Data":"867e3e7ca9ef86730c2cf37ce0362ea03d5fb5cad1cd93017b481be88b9b3fc7"} Jan 22 17:06:20 crc kubenswrapper[5032]: I0122 17:06:20.808812 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-notification-agent" probeResult="failure" output=< Jan 22 17:06:20 crc kubenswrapper[5032]: Unkown error: Expecting value: line 1 column 1 (char 0) Jan 22 17:06:20 crc kubenswrapper[5032]: > Jan 22 17:06:20 crc kubenswrapper[5032]: I0122 17:06:20.809499 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Jan 22 17:06:22 crc kubenswrapper[5032]: I0122 17:06:22.388099 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" podUID="6a0ca471-23dc-4eac-9c3a-0adfc1c0a003" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.78:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 17:06:27 crc kubenswrapper[5032]: I0122 17:06:27.387152 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-skzfx" podUID="6a0ca471-23dc-4eac-9c3a-0adfc1c0a003" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.78:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 17:06:32 crc kubenswrapper[5032]: E0122 17:06:32.510031 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:06:32 crc kubenswrapper[5032]: I0122 17:06:32.534926 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:06:32 crc kubenswrapper[5032]: I0122 17:06:32.592693 5032 generic.go:334] "Generic (PLEG): container finished" podID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerID="8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58" exitCode=0 Jan 22 17:06:32 crc kubenswrapper[5032]: I0122 17:06:32.592851 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66ntp" event={"ID":"1a8f6dee-690f-41aa-a69d-abbd50390aa1","Type":"ContainerDied","Data":"8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58"} Jan 22 17:06:32 crc kubenswrapper[5032]: I0122 17:06:32.600727 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:06:32 crc kubenswrapper[5032]: E0122 17:06:32.601069 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:06:33 crc kubenswrapper[5032]: I0122 17:06:33.613731 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e831a62a-4003-41ec-b3ac-ba0d29a91d95","Type":"ContainerStarted","Data":"c185cf502cef569982e24b60c276f8495158147f041f546705a6e6091febfe23"} Jan 22 17:06:33 crc kubenswrapper[5032]: I0122 17:06:33.619388 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerStarted","Data":"16341ffc7b1ca6cf7093d31356a61b0820fac41214d927947f3e87d64494584d"} Jan 22 17:06:33 crc kubenswrapper[5032]: I0122 17:06:33.620067 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-notification-agent" containerStatusID={"Type":"cri-o","ID":"e9b5756d752a24e4b261161f3ace04c3ef419cd353590da53b00b25061410f56"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-notification-agent failed liveness probe, will be restarted" Jan 22 17:06:33 crc kubenswrapper[5032]: I0122 17:06:33.620167 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-notification-agent" containerID="cri-o://e9b5756d752a24e4b261161f3ace04c3ef419cd353590da53b00b25061410f56" gracePeriod=30 Jan 22 17:06:33 crc kubenswrapper[5032]: I0122 17:06:33.625245 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66ntp" event={"ID":"1a8f6dee-690f-41aa-a69d-abbd50390aa1","Type":"ContainerStarted","Data":"d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5"} Jan 22 17:06:36 crc kubenswrapper[5032]: I0122 17:06:36.659019 5032 generic.go:334] "Generic (PLEG): container finished" podID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerID="d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5" exitCode=0 Jan 22 17:06:36 crc kubenswrapper[5032]: I0122 17:06:36.659094 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66ntp" event={"ID":"1a8f6dee-690f-41aa-a69d-abbd50390aa1","Type":"ContainerDied","Data":"d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5"} Jan 22 17:06:37 crc kubenswrapper[5032]: I0122 17:06:37.408409 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 17:06:37 crc kubenswrapper[5032]: I0122 17:06:37.671323 5032 generic.go:334] "Generic (PLEG): container finished" podID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerID="e9b5756d752a24e4b261161f3ace04c3ef419cd353590da53b00b25061410f56" exitCode=0 Jan 22 17:06:37 crc kubenswrapper[5032]: I0122 17:06:37.671399 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerDied","Data":"e9b5756d752a24e4b261161f3ace04c3ef419cd353590da53b00b25061410f56"} Jan 22 17:06:37 crc kubenswrapper[5032]: I0122 17:06:37.673944 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66ntp" event={"ID":"1a8f6dee-690f-41aa-a69d-abbd50390aa1","Type":"ContainerStarted","Data":"7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb"} Jan 22 17:06:37 crc kubenswrapper[5032]: I0122 17:06:37.702145 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-66ntp" podStartSLOduration=47.125938887 podStartE2EDuration="51.702128678s" podCreationTimestamp="2026-01-22 17:05:46 +0000 UTC" firstStartedPulling="2026-01-22 17:06:32.594783941 +0000 UTC m=+3696.406952982" lastFinishedPulling="2026-01-22 17:06:37.170973712 +0000 UTC m=+3700.983142773" observedRunningTime="2026-01-22 17:06:37.690435357 +0000 UTC m=+3701.502604398" watchObservedRunningTime="2026-01-22 17:06:37.702128678 +0000 UTC m=+3701.514297709" Jan 22 17:06:38 crc kubenswrapper[5032]: I0122 17:06:38.696004 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89f96057-2096-47e7-88c3-bdeab76e4fe6","Type":"ContainerStarted","Data":"9148d3dca54df0ad3d3ac3695f80cef0b2cc2460cc1ede3e91aabdfb7731ed3c"} Jan 22 17:06:42 crc kubenswrapper[5032]: I0122 17:06:42.418040 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 17:06:46 crc kubenswrapper[5032]: I0122 17:06:46.740881 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:06:46 crc kubenswrapper[5032]: I0122 17:06:46.741374 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:06:46 crc kubenswrapper[5032]: I0122 17:06:46.825199 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:06:46 crc kubenswrapper[5032]: I0122 17:06:46.892435 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:06:47 crc kubenswrapper[5032]: I0122 17:06:47.455127 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:06:47 crc kubenswrapper[5032]: E0122 17:06:47.455361 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:06:47 crc kubenswrapper[5032]: I0122 17:06:47.633061 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-66ntp"] Jan 22 17:06:48 crc kubenswrapper[5032]: I0122 17:06:48.811555 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-66ntp" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="registry-server" containerID="cri-o://7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb" gracePeriod=2 Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.339380 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.401081 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-utilities\") pod \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.401544 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-catalog-content\") pod \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.401600 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqn78\" (UniqueName: \"kubernetes.io/projected/1a8f6dee-690f-41aa-a69d-abbd50390aa1-kube-api-access-dqn78\") pod \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\" (UID: \"1a8f6dee-690f-41aa-a69d-abbd50390aa1\") " Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.402038 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-utilities" (OuterVolumeSpecName: "utilities") pod "1a8f6dee-690f-41aa-a69d-abbd50390aa1" (UID: "1a8f6dee-690f-41aa-a69d-abbd50390aa1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.421506 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a8f6dee-690f-41aa-a69d-abbd50390aa1-kube-api-access-dqn78" (OuterVolumeSpecName: "kube-api-access-dqn78") pod "1a8f6dee-690f-41aa-a69d-abbd50390aa1" (UID: "1a8f6dee-690f-41aa-a69d-abbd50390aa1"). InnerVolumeSpecName "kube-api-access-dqn78". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.503532 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqn78\" (UniqueName: \"kubernetes.io/projected/1a8f6dee-690f-41aa-a69d-abbd50390aa1-kube-api-access-dqn78\") on node \"crc\" DevicePath \"\"" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.503706 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.548298 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1a8f6dee-690f-41aa-a69d-abbd50390aa1" (UID: "1a8f6dee-690f-41aa-a69d-abbd50390aa1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.605318 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8f6dee-690f-41aa-a69d-abbd50390aa1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.824014 5032 generic.go:334] "Generic (PLEG): container finished" podID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerID="7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb" exitCode=0 Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.824066 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66ntp" event={"ID":"1a8f6dee-690f-41aa-a69d-abbd50390aa1","Type":"ContainerDied","Data":"7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb"} Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.824088 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66ntp" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.824107 5032 scope.go:117] "RemoveContainer" containerID="7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.824095 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66ntp" event={"ID":"1a8f6dee-690f-41aa-a69d-abbd50390aa1","Type":"ContainerDied","Data":"ab1e029c0aa60b5bb7f1f8b1b0cdb4b11219f9e4c38b631ade03cbf360fe940d"} Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.859216 5032 scope.go:117] "RemoveContainer" containerID="d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.872731 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-66ntp"] Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.887230 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-66ntp"] Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.887566 5032 scope.go:117] "RemoveContainer" containerID="8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.944888 5032 scope.go:117] "RemoveContainer" containerID="7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb" Jan 22 17:06:49 crc kubenswrapper[5032]: E0122 17:06:49.945448 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb\": container with ID starting with 7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb not found: ID does not exist" containerID="7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.945575 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb"} err="failed to get container status \"7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb\": rpc error: code = NotFound desc = could not find container \"7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb\": container with ID starting with 7cb5df88c7bdba08e0430217d5c8d04b81b89ef78d1ce52556ba9337931aa0eb not found: ID does not exist" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.945659 5032 scope.go:117] "RemoveContainer" containerID="d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5" Jan 22 17:06:49 crc kubenswrapper[5032]: E0122 17:06:49.946057 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5\": container with ID starting with d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5 not found: ID does not exist" containerID="d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.946152 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5"} err="failed to get container status \"d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5\": rpc error: code = NotFound desc = could not find container \"d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5\": container with ID starting with d5122c823ac04404db140dbf4ba3a403304e8b816c8ce0d07193855e825003f5 not found: ID does not exist" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.946236 5032 scope.go:117] "RemoveContainer" containerID="8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58" Jan 22 17:06:49 crc kubenswrapper[5032]: E0122 17:06:49.946591 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58\": container with ID starting with 8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58 not found: ID does not exist" containerID="8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58" Jan 22 17:06:49 crc kubenswrapper[5032]: I0122 17:06:49.946669 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58"} err="failed to get container status \"8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58\": rpc error: code = NotFound desc = could not find container \"8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58\": container with ID starting with 8988606115c597ef48cc4a3a3b4f863a67a75a68357e32ae2e8e19ef75f40c58 not found: ID does not exist" Jan 22 17:06:50 crc kubenswrapper[5032]: I0122 17:06:50.466070 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" path="/var/lib/kubelet/pods/1a8f6dee-690f-41aa-a69d-abbd50390aa1/volumes" Jan 22 17:06:58 crc kubenswrapper[5032]: I0122 17:06:58.456182 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:06:58 crc kubenswrapper[5032]: E0122 17:06:58.457324 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:07:11 crc kubenswrapper[5032]: I0122 17:07:11.455494 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:07:11 crc kubenswrapper[5032]: E0122 17:07:11.456478 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:07:24 crc kubenswrapper[5032]: I0122 17:07:24.454228 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:07:24 crc kubenswrapper[5032]: E0122 17:07:24.454966 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:07:36 crc kubenswrapper[5032]: I0122 17:07:36.469170 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:07:36 crc kubenswrapper[5032]: E0122 17:07:36.469973 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:07:51 crc kubenswrapper[5032]: I0122 17:07:51.455483 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:07:51 crc kubenswrapper[5032]: E0122 17:07:51.456811 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:08:02 crc kubenswrapper[5032]: I0122 17:08:02.455059 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:08:02 crc kubenswrapper[5032]: E0122 17:08:02.455904 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:08:15 crc kubenswrapper[5032]: I0122 17:08:15.455378 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:08:15 crc kubenswrapper[5032]: E0122 17:08:15.456488 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:08:29 crc kubenswrapper[5032]: I0122 17:08:29.455653 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:08:29 crc kubenswrapper[5032]: E0122 17:08:29.456398 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:08:42 crc kubenswrapper[5032]: I0122 17:08:42.455841 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:08:42 crc kubenswrapper[5032]: E0122 17:08:42.457167 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.233364 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bxcs9"] Jan 22 17:08:51 crc kubenswrapper[5032]: E0122 17:08:51.234297 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="extract-utilities" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.234314 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="extract-utilities" Jan 22 17:08:51 crc kubenswrapper[5032]: E0122 17:08:51.234329 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="extract-content" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.234336 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="extract-content" Jan 22 17:08:51 crc kubenswrapper[5032]: E0122 17:08:51.234358 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="registry-server" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.234366 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="registry-server" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.234622 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a8f6dee-690f-41aa-a69d-abbd50390aa1" containerName="registry-server" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.236162 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.261209 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bxcs9"] Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.373889 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-utilities\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.374265 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-catalog-content\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.374457 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zt7n\" (UniqueName: \"kubernetes.io/projected/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-kube-api-access-5zt7n\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.475767 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-utilities\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.475873 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-catalog-content\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.475919 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zt7n\" (UniqueName: \"kubernetes.io/projected/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-kube-api-access-5zt7n\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.476633 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-utilities\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:51 crc kubenswrapper[5032]: I0122 17:08:51.476851 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-catalog-content\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:52 crc kubenswrapper[5032]: I0122 17:08:52.033326 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zt7n\" (UniqueName: \"kubernetes.io/projected/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-kube-api-access-5zt7n\") pod \"redhat-marketplace-bxcs9\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:52 crc kubenswrapper[5032]: I0122 17:08:52.154539 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:08:52 crc kubenswrapper[5032]: I0122 17:08:52.748079 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bxcs9"] Jan 22 17:08:52 crc kubenswrapper[5032]: W0122 17:08:52.749199 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9ccea65_3cc2_4748_83cd_f4b1ee7d7770.slice/crio-ecd5a67beca3c9876138918088dc83fe153e21829c228e419cff21caac685bf5 WatchSource:0}: Error finding container ecd5a67beca3c9876138918088dc83fe153e21829c228e419cff21caac685bf5: Status 404 returned error can't find the container with id ecd5a67beca3c9876138918088dc83fe153e21829c228e419cff21caac685bf5 Jan 22 17:08:53 crc kubenswrapper[5032]: I0122 17:08:53.123822 5032 generic.go:334] "Generic (PLEG): container finished" podID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerID="c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6" exitCode=0 Jan 22 17:08:53 crc kubenswrapper[5032]: I0122 17:08:53.124782 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bxcs9" event={"ID":"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770","Type":"ContainerDied","Data":"c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6"} Jan 22 17:08:53 crc kubenswrapper[5032]: I0122 17:08:53.124974 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bxcs9" event={"ID":"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770","Type":"ContainerStarted","Data":"ecd5a67beca3c9876138918088dc83fe153e21829c228e419cff21caac685bf5"} Jan 22 17:08:53 crc kubenswrapper[5032]: I0122 17:08:53.456363 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:08:53 crc kubenswrapper[5032]: E0122 17:08:53.456613 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:08:55 crc kubenswrapper[5032]: I0122 17:08:55.144709 5032 generic.go:334] "Generic (PLEG): container finished" podID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerID="d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44" exitCode=0 Jan 22 17:08:55 crc kubenswrapper[5032]: I0122 17:08:55.144836 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bxcs9" event={"ID":"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770","Type":"ContainerDied","Data":"d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44"} Jan 22 17:08:56 crc kubenswrapper[5032]: I0122 17:08:56.156170 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bxcs9" event={"ID":"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770","Type":"ContainerStarted","Data":"604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30"} Jan 22 17:08:56 crc kubenswrapper[5032]: I0122 17:08:56.186702 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bxcs9" podStartSLOduration=2.681467891 podStartE2EDuration="5.186677599s" podCreationTimestamp="2026-01-22 17:08:51 +0000 UTC" firstStartedPulling="2026-01-22 17:08:53.129926951 +0000 UTC m=+3836.942096002" lastFinishedPulling="2026-01-22 17:08:55.635136649 +0000 UTC m=+3839.447305710" observedRunningTime="2026-01-22 17:08:56.171565106 +0000 UTC m=+3839.983734227" watchObservedRunningTime="2026-01-22 17:08:56.186677599 +0000 UTC m=+3839.998846630" Jan 22 17:09:02 crc kubenswrapper[5032]: I0122 17:09:02.154771 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:09:02 crc kubenswrapper[5032]: I0122 17:09:02.155995 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:09:02 crc kubenswrapper[5032]: I0122 17:09:02.253765 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:09:03 crc kubenswrapper[5032]: I0122 17:09:03.294389 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:09:03 crc kubenswrapper[5032]: I0122 17:09:03.354000 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bxcs9"] Jan 22 17:09:04 crc kubenswrapper[5032]: I0122 17:09:04.455762 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:09:04 crc kubenswrapper[5032]: E0122 17:09:04.457307 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:09:05 crc kubenswrapper[5032]: I0122 17:09:05.249400 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bxcs9" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="registry-server" containerID="cri-o://604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30" gracePeriod=2 Jan 22 17:09:05 crc kubenswrapper[5032]: I0122 17:09:05.868109 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:09:05 crc kubenswrapper[5032]: I0122 17:09:05.986343 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-utilities\") pod \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " Jan 22 17:09:05 crc kubenswrapper[5032]: I0122 17:09:05.986432 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zt7n\" (UniqueName: \"kubernetes.io/projected/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-kube-api-access-5zt7n\") pod \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " Jan 22 17:09:05 crc kubenswrapper[5032]: I0122 17:09:05.986622 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-catalog-content\") pod \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\" (UID: \"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770\") " Jan 22 17:09:05 crc kubenswrapper[5032]: I0122 17:09:05.991316 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-utilities" (OuterVolumeSpecName: "utilities") pod "c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" (UID: "c9ccea65-3cc2-4748-83cd-f4b1ee7d7770"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.022473 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-kube-api-access-5zt7n" (OuterVolumeSpecName: "kube-api-access-5zt7n") pod "c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" (UID: "c9ccea65-3cc2-4748-83cd-f4b1ee7d7770"). InnerVolumeSpecName "kube-api-access-5zt7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.040069 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" (UID: "c9ccea65-3cc2-4748-83cd-f4b1ee7d7770"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.089735 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.089779 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zt7n\" (UniqueName: \"kubernetes.io/projected/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-kube-api-access-5zt7n\") on node \"crc\" DevicePath \"\"" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.089794 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.259434 5032 generic.go:334] "Generic (PLEG): container finished" podID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerID="604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30" exitCode=0 Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.259487 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bxcs9" event={"ID":"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770","Type":"ContainerDied","Data":"604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30"} Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.259554 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bxcs9" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.259801 5032 scope.go:117] "RemoveContainer" containerID="604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.259783 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bxcs9" event={"ID":"c9ccea65-3cc2-4748-83cd-f4b1ee7d7770","Type":"ContainerDied","Data":"ecd5a67beca3c9876138918088dc83fe153e21829c228e419cff21caac685bf5"} Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.299525 5032 scope.go:117] "RemoveContainer" containerID="d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.310399 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bxcs9"] Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.323084 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bxcs9"] Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.331045 5032 scope.go:117] "RemoveContainer" containerID="c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.377503 5032 scope.go:117] "RemoveContainer" containerID="604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30" Jan 22 17:09:06 crc kubenswrapper[5032]: E0122 17:09:06.377961 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30\": container with ID starting with 604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30 not found: ID does not exist" containerID="604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.378010 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30"} err="failed to get container status \"604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30\": rpc error: code = NotFound desc = could not find container \"604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30\": container with ID starting with 604de00e6ef602eac47170a7d773c6da96065fd600dd688bacf8412963e25d30 not found: ID does not exist" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.378042 5032 scope.go:117] "RemoveContainer" containerID="d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44" Jan 22 17:09:06 crc kubenswrapper[5032]: E0122 17:09:06.378442 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44\": container with ID starting with d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44 not found: ID does not exist" containerID="d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.378494 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44"} err="failed to get container status \"d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44\": rpc error: code = NotFound desc = could not find container \"d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44\": container with ID starting with d4ffac9e47ace4fd3f8793eb4d1afa1cc787ce85826eb69a48f21e2f0cff6e44 not found: ID does not exist" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.378530 5032 scope.go:117] "RemoveContainer" containerID="c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6" Jan 22 17:09:06 crc kubenswrapper[5032]: E0122 17:09:06.378891 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6\": container with ID starting with c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6 not found: ID does not exist" containerID="c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.378932 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6"} err="failed to get container status \"c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6\": rpc error: code = NotFound desc = could not find container \"c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6\": container with ID starting with c83e1615e6d1d89abe1a0fc275b3f3408f82f243acffe6d22cafd3a7c1e666c6 not found: ID does not exist" Jan 22 17:09:06 crc kubenswrapper[5032]: I0122 17:09:06.475498 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" path="/var/lib/kubelet/pods/c9ccea65-3cc2-4748-83cd-f4b1ee7d7770/volumes" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.123273 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j5qks"] Jan 22 17:09:08 crc kubenswrapper[5032]: E0122 17:09:08.123990 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="extract-utilities" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.124005 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="extract-utilities" Jan 22 17:09:08 crc kubenswrapper[5032]: E0122 17:09:08.124021 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="extract-content" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.124029 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="extract-content" Jan 22 17:09:08 crc kubenswrapper[5032]: E0122 17:09:08.124058 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="registry-server" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.124068 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="registry-server" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.124294 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ccea65-3cc2-4748-83cd-f4b1ee7d7770" containerName="registry-server" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.125961 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.136283 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j5qks"] Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.236815 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-utilities\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.236922 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztxjm\" (UniqueName: \"kubernetes.io/projected/d403b2a2-28ef-4f25-b148-c41b46132a89-kube-api-access-ztxjm\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.237064 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-catalog-content\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.338265 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-utilities\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.338337 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztxjm\" (UniqueName: \"kubernetes.io/projected/d403b2a2-28ef-4f25-b148-c41b46132a89-kube-api-access-ztxjm\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.338420 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-catalog-content\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.338938 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-utilities\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.338956 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-catalog-content\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.364608 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztxjm\" (UniqueName: \"kubernetes.io/projected/d403b2a2-28ef-4f25-b148-c41b46132a89-kube-api-access-ztxjm\") pod \"community-operators-j5qks\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.446128 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:08 crc kubenswrapper[5032]: I0122 17:09:08.944547 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j5qks"] Jan 22 17:09:09 crc kubenswrapper[5032]: I0122 17:09:09.291197 5032 generic.go:334] "Generic (PLEG): container finished" podID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerID="887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00" exitCode=0 Jan 22 17:09:09 crc kubenswrapper[5032]: I0122 17:09:09.291314 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5qks" event={"ID":"d403b2a2-28ef-4f25-b148-c41b46132a89","Type":"ContainerDied","Data":"887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00"} Jan 22 17:09:09 crc kubenswrapper[5032]: I0122 17:09:09.291543 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5qks" event={"ID":"d403b2a2-28ef-4f25-b148-c41b46132a89","Type":"ContainerStarted","Data":"000009c109efa5d02b301b3e2c2396ac53a45d0737d09803342a4ff30a24573c"} Jan 22 17:09:10 crc kubenswrapper[5032]: I0122 17:09:10.305503 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5qks" event={"ID":"d403b2a2-28ef-4f25-b148-c41b46132a89","Type":"ContainerStarted","Data":"433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80"} Jan 22 17:09:11 crc kubenswrapper[5032]: I0122 17:09:11.321259 5032 generic.go:334] "Generic (PLEG): container finished" podID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerID="433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80" exitCode=0 Jan 22 17:09:11 crc kubenswrapper[5032]: I0122 17:09:11.321485 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5qks" event={"ID":"d403b2a2-28ef-4f25-b148-c41b46132a89","Type":"ContainerDied","Data":"433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80"} Jan 22 17:09:12 crc kubenswrapper[5032]: I0122 17:09:12.333809 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5qks" event={"ID":"d403b2a2-28ef-4f25-b148-c41b46132a89","Type":"ContainerStarted","Data":"cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a"} Jan 22 17:09:12 crc kubenswrapper[5032]: I0122 17:09:12.355821 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j5qks" podStartSLOduration=1.936289186 podStartE2EDuration="4.355799249s" podCreationTimestamp="2026-01-22 17:09:08 +0000 UTC" firstStartedPulling="2026-01-22 17:09:09.293166065 +0000 UTC m=+3853.105335096" lastFinishedPulling="2026-01-22 17:09:11.712676098 +0000 UTC m=+3855.524845159" observedRunningTime="2026-01-22 17:09:12.352959373 +0000 UTC m=+3856.165128424" watchObservedRunningTime="2026-01-22 17:09:12.355799249 +0000 UTC m=+3856.167968290" Jan 22 17:09:17 crc kubenswrapper[5032]: I0122 17:09:17.454614 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:09:17 crc kubenswrapper[5032]: E0122 17:09:17.455422 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:09:18 crc kubenswrapper[5032]: I0122 17:09:18.446481 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:18 crc kubenswrapper[5032]: I0122 17:09:18.446771 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:18 crc kubenswrapper[5032]: I0122 17:09:18.506060 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:19 crc kubenswrapper[5032]: I0122 17:09:19.464926 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:19 crc kubenswrapper[5032]: I0122 17:09:19.534453 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j5qks"] Jan 22 17:09:21 crc kubenswrapper[5032]: I0122 17:09:21.429936 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j5qks" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="registry-server" containerID="cri-o://cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a" gracePeriod=2 Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.023620 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.131658 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-catalog-content\") pod \"d403b2a2-28ef-4f25-b148-c41b46132a89\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.131769 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztxjm\" (UniqueName: \"kubernetes.io/projected/d403b2a2-28ef-4f25-b148-c41b46132a89-kube-api-access-ztxjm\") pod \"d403b2a2-28ef-4f25-b148-c41b46132a89\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.131822 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-utilities\") pod \"d403b2a2-28ef-4f25-b148-c41b46132a89\" (UID: \"d403b2a2-28ef-4f25-b148-c41b46132a89\") " Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.134054 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-utilities" (OuterVolumeSpecName: "utilities") pod "d403b2a2-28ef-4f25-b148-c41b46132a89" (UID: "d403b2a2-28ef-4f25-b148-c41b46132a89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.138037 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d403b2a2-28ef-4f25-b148-c41b46132a89-kube-api-access-ztxjm" (OuterVolumeSpecName: "kube-api-access-ztxjm") pod "d403b2a2-28ef-4f25-b148-c41b46132a89" (UID: "d403b2a2-28ef-4f25-b148-c41b46132a89"). InnerVolumeSpecName "kube-api-access-ztxjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.200279 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d403b2a2-28ef-4f25-b148-c41b46132a89" (UID: "d403b2a2-28ef-4f25-b148-c41b46132a89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.233948 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.233992 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztxjm\" (UniqueName: \"kubernetes.io/projected/d403b2a2-28ef-4f25-b148-c41b46132a89-kube-api-access-ztxjm\") on node \"crc\" DevicePath \"\"" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.234008 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d403b2a2-28ef-4f25-b148-c41b46132a89-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.442262 5032 generic.go:334] "Generic (PLEG): container finished" podID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerID="cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a" exitCode=0 Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.442309 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5qks" event={"ID":"d403b2a2-28ef-4f25-b148-c41b46132a89","Type":"ContainerDied","Data":"cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a"} Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.442337 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5qks" event={"ID":"d403b2a2-28ef-4f25-b148-c41b46132a89","Type":"ContainerDied","Data":"000009c109efa5d02b301b3e2c2396ac53a45d0737d09803342a4ff30a24573c"} Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.442358 5032 scope.go:117] "RemoveContainer" containerID="cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.442497 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j5qks" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.474262 5032 scope.go:117] "RemoveContainer" containerID="433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.493703 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j5qks"] Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.503787 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j5qks"] Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.504724 5032 scope.go:117] "RemoveContainer" containerID="887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.542759 5032 scope.go:117] "RemoveContainer" containerID="cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a" Jan 22 17:09:22 crc kubenswrapper[5032]: E0122 17:09:22.543588 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a\": container with ID starting with cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a not found: ID does not exist" containerID="cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.543626 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a"} err="failed to get container status \"cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a\": rpc error: code = NotFound desc = could not find container \"cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a\": container with ID starting with cbd785b071cdf7d36d13785d680941e55bbdae555cdd03612e8ec8a007e8a98a not found: ID does not exist" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.543655 5032 scope.go:117] "RemoveContainer" containerID="433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80" Jan 22 17:09:22 crc kubenswrapper[5032]: E0122 17:09:22.544028 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80\": container with ID starting with 433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80 not found: ID does not exist" containerID="433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.544057 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80"} err="failed to get container status \"433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80\": rpc error: code = NotFound desc = could not find container \"433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80\": container with ID starting with 433487290eed0c4d933d8f0d0e68e28023a96387d0a2b323d996507681427d80 not found: ID does not exist" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.544076 5032 scope.go:117] "RemoveContainer" containerID="887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00" Jan 22 17:09:22 crc kubenswrapper[5032]: E0122 17:09:22.544349 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00\": container with ID starting with 887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00 not found: ID does not exist" containerID="887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00" Jan 22 17:09:22 crc kubenswrapper[5032]: I0122 17:09:22.544375 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00"} err="failed to get container status \"887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00\": rpc error: code = NotFound desc = could not find container \"887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00\": container with ID starting with 887e819f1d66e1e919865120acc7dfb0d16f43c2721320bad27b04b8f1432a00 not found: ID does not exist" Jan 22 17:09:24 crc kubenswrapper[5032]: I0122 17:09:24.467254 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" path="/var/lib/kubelet/pods/d403b2a2-28ef-4f25-b148-c41b46132a89/volumes" Jan 22 17:09:29 crc kubenswrapper[5032]: I0122 17:09:29.455089 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:09:29 crc kubenswrapper[5032]: E0122 17:09:29.455995 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:09:44 crc kubenswrapper[5032]: I0122 17:09:44.455784 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:09:44 crc kubenswrapper[5032]: E0122 17:09:44.456744 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:09:59 crc kubenswrapper[5032]: I0122 17:09:59.454691 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:09:59 crc kubenswrapper[5032]: E0122 17:09:59.455477 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.455287 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:10:14 crc kubenswrapper[5032]: E0122 17:10:14.456169 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.790652 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dxxqb"] Jan 22 17:10:14 crc kubenswrapper[5032]: E0122 17:10:14.791163 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="extract-content" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.791188 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="extract-content" Jan 22 17:10:14 crc kubenswrapper[5032]: E0122 17:10:14.791220 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="registry-server" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.791230 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="registry-server" Jan 22 17:10:14 crc kubenswrapper[5032]: E0122 17:10:14.791253 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="extract-utilities" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.791262 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="extract-utilities" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.791481 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d403b2a2-28ef-4f25-b148-c41b46132a89" containerName="registry-server" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.793141 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.830529 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dxxqb"] Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.890410 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-utilities\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.890482 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-catalog-content\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.890516 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmlr6\" (UniqueName: \"kubernetes.io/projected/c9e2b438-91ae-489f-ba17-456445ad126a-kube-api-access-kmlr6\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.992851 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-utilities\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.992972 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-catalog-content\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.993007 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmlr6\" (UniqueName: \"kubernetes.io/projected/c9e2b438-91ae-489f-ba17-456445ad126a-kube-api-access-kmlr6\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.993368 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-utilities\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:14 crc kubenswrapper[5032]: I0122 17:10:14.993423 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-catalog-content\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:15 crc kubenswrapper[5032]: I0122 17:10:15.016921 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmlr6\" (UniqueName: \"kubernetes.io/projected/c9e2b438-91ae-489f-ba17-456445ad126a-kube-api-access-kmlr6\") pod \"certified-operators-dxxqb\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:15 crc kubenswrapper[5032]: I0122 17:10:15.118074 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:15 crc kubenswrapper[5032]: I0122 17:10:15.673776 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dxxqb"] Jan 22 17:10:15 crc kubenswrapper[5032]: I0122 17:10:15.974455 5032 generic.go:334] "Generic (PLEG): container finished" podID="c9e2b438-91ae-489f-ba17-456445ad126a" containerID="ef7215a727fbcc0c3443d8d6eef4e3178abe10f233d1a847a4b1bea1dcc9bcc9" exitCode=0 Jan 22 17:10:15 crc kubenswrapper[5032]: I0122 17:10:15.974725 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dxxqb" event={"ID":"c9e2b438-91ae-489f-ba17-456445ad126a","Type":"ContainerDied","Data":"ef7215a727fbcc0c3443d8d6eef4e3178abe10f233d1a847a4b1bea1dcc9bcc9"} Jan 22 17:10:15 crc kubenswrapper[5032]: I0122 17:10:15.974756 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dxxqb" event={"ID":"c9e2b438-91ae-489f-ba17-456445ad126a","Type":"ContainerStarted","Data":"ce7c6a629e2e04cad38cdaaeafef5ef7031e05020e3d440d5d4f38396087366e"} Jan 22 17:10:27 crc kubenswrapper[5032]: I0122 17:10:27.092695 5032 generic.go:334] "Generic (PLEG): container finished" podID="c9e2b438-91ae-489f-ba17-456445ad126a" containerID="092afb9cb50ca41bfad40468f0e3e4e729d35c9e47686fdc98a454603baf66da" exitCode=0 Jan 22 17:10:27 crc kubenswrapper[5032]: I0122 17:10:27.092778 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dxxqb" event={"ID":"c9e2b438-91ae-489f-ba17-456445ad126a","Type":"ContainerDied","Data":"092afb9cb50ca41bfad40468f0e3e4e729d35c9e47686fdc98a454603baf66da"} Jan 22 17:10:28 crc kubenswrapper[5032]: I0122 17:10:28.108007 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dxxqb" event={"ID":"c9e2b438-91ae-489f-ba17-456445ad126a","Type":"ContainerStarted","Data":"5191032f20fc5e555cfc958f43b0d38396214a88bb7108868ed5e58ce9f0260e"} Jan 22 17:10:28 crc kubenswrapper[5032]: I0122 17:10:28.134541 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dxxqb" podStartSLOduration=2.599271361 podStartE2EDuration="14.134520102s" podCreationTimestamp="2026-01-22 17:10:14 +0000 UTC" firstStartedPulling="2026-01-22 17:10:15.976495423 +0000 UTC m=+3919.788664454" lastFinishedPulling="2026-01-22 17:10:27.511744164 +0000 UTC m=+3931.323913195" observedRunningTime="2026-01-22 17:10:28.128121272 +0000 UTC m=+3931.940290343" watchObservedRunningTime="2026-01-22 17:10:28.134520102 +0000 UTC m=+3931.946689133" Jan 22 17:10:29 crc kubenswrapper[5032]: I0122 17:10:29.454576 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:10:29 crc kubenswrapper[5032]: E0122 17:10:29.456128 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.119391 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.120007 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.165422 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.221002 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.290059 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dxxqb"] Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.406039 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-76bkz"] Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.406299 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-76bkz" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="registry-server" containerID="cri-o://19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47" gracePeriod=2 Jan 22 17:10:35 crc kubenswrapper[5032]: I0122 17:10:35.993519 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.136904 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-catalog-content\") pod \"38e9fac6-aa45-4767-8db6-1a53a6d35689\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.137154 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjrjg\" (UniqueName: \"kubernetes.io/projected/38e9fac6-aa45-4767-8db6-1a53a6d35689-kube-api-access-vjrjg\") pod \"38e9fac6-aa45-4767-8db6-1a53a6d35689\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.137244 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-utilities\") pod \"38e9fac6-aa45-4767-8db6-1a53a6d35689\" (UID: \"38e9fac6-aa45-4767-8db6-1a53a6d35689\") " Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.138139 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-utilities" (OuterVolumeSpecName: "utilities") pod "38e9fac6-aa45-4767-8db6-1a53a6d35689" (UID: "38e9fac6-aa45-4767-8db6-1a53a6d35689"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.144121 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e9fac6-aa45-4767-8db6-1a53a6d35689-kube-api-access-vjrjg" (OuterVolumeSpecName: "kube-api-access-vjrjg") pod "38e9fac6-aa45-4767-8db6-1a53a6d35689" (UID: "38e9fac6-aa45-4767-8db6-1a53a6d35689"). InnerVolumeSpecName "kube-api-access-vjrjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.185511 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "38e9fac6-aa45-4767-8db6-1a53a6d35689" (UID: "38e9fac6-aa45-4767-8db6-1a53a6d35689"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.199428 5032 generic.go:334] "Generic (PLEG): container finished" podID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerID="19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47" exitCode=0 Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.199608 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76bkz" event={"ID":"38e9fac6-aa45-4767-8db6-1a53a6d35689","Type":"ContainerDied","Data":"19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47"} Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.199667 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76bkz" event={"ID":"38e9fac6-aa45-4767-8db6-1a53a6d35689","Type":"ContainerDied","Data":"aaa2c6053029e51629d79839ebdca6977ab339ca7988a9a6411439ed28359cb9"} Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.199688 5032 scope.go:117] "RemoveContainer" containerID="19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.199630 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76bkz" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.229445 5032 scope.go:117] "RemoveContainer" containerID="208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.241091 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.241324 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjrjg\" (UniqueName: \"kubernetes.io/projected/38e9fac6-aa45-4767-8db6-1a53a6d35689-kube-api-access-vjrjg\") on node \"crc\" DevicePath \"\"" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.241346 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38e9fac6-aa45-4767-8db6-1a53a6d35689-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.251380 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-76bkz"] Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.258094 5032 scope.go:117] "RemoveContainer" containerID="087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.261354 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-76bkz"] Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.360073 5032 scope.go:117] "RemoveContainer" containerID="19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47" Jan 22 17:10:36 crc kubenswrapper[5032]: E0122 17:10:36.360841 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47\": container with ID starting with 19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47 not found: ID does not exist" containerID="19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.360898 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47"} err="failed to get container status \"19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47\": rpc error: code = NotFound desc = could not find container \"19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47\": container with ID starting with 19e01d9d32f203aef140c99c2b0259a787731ac3707e9b5ce91460ed0c779c47 not found: ID does not exist" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.360931 5032 scope.go:117] "RemoveContainer" containerID="208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58" Jan 22 17:10:36 crc kubenswrapper[5032]: E0122 17:10:36.361348 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58\": container with ID starting with 208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58 not found: ID does not exist" containerID="208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.361373 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58"} err="failed to get container status \"208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58\": rpc error: code = NotFound desc = could not find container \"208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58\": container with ID starting with 208713c603ea25f10b15cf3498780ad7ee27df65cb2fe959ec4af5835ad14c58 not found: ID does not exist" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.361388 5032 scope.go:117] "RemoveContainer" containerID="087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18" Jan 22 17:10:36 crc kubenswrapper[5032]: E0122 17:10:36.361679 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18\": container with ID starting with 087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18 not found: ID does not exist" containerID="087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.361701 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18"} err="failed to get container status \"087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18\": rpc error: code = NotFound desc = could not find container \"087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18\": container with ID starting with 087dad3538f1fb0b04d49053e5336ef3879b235989472a9a1db3bf25456a3e18 not found: ID does not exist" Jan 22 17:10:36 crc kubenswrapper[5032]: I0122 17:10:36.466905 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" path="/var/lib/kubelet/pods/38e9fac6-aa45-4767-8db6-1a53a6d35689/volumes" Jan 22 17:10:43 crc kubenswrapper[5032]: I0122 17:10:43.454463 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:10:43 crc kubenswrapper[5032]: E0122 17:10:43.456077 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:10:57 crc kubenswrapper[5032]: I0122 17:10:57.454910 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:10:57 crc kubenswrapper[5032]: E0122 17:10:57.455597 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:11:08 crc kubenswrapper[5032]: I0122 17:11:08.455377 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:11:08 crc kubenswrapper[5032]: E0122 17:11:08.456193 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:11:19 crc kubenswrapper[5032]: I0122 17:11:19.454616 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:11:19 crc kubenswrapper[5032]: E0122 17:11:19.455706 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:11:34 crc kubenswrapper[5032]: I0122 17:11:34.455230 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:11:34 crc kubenswrapper[5032]: I0122 17:11:34.789372 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"c9f7cfd9e8804d35f2e331c5fc47a37c0c6fecaef63932fc83da888c4907f3b8"} Jan 22 17:14:03 crc kubenswrapper[5032]: I0122 17:14:03.016572 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:14:03 crc kubenswrapper[5032]: I0122 17:14:03.017235 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:14:33 crc kubenswrapper[5032]: I0122 17:14:33.017421 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:14:33 crc kubenswrapper[5032]: I0122 17:14:33.018099 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.191014 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn"] Jan 22 17:15:00 crc kubenswrapper[5032]: E0122 17:15:00.193038 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="extract-utilities" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.193131 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="extract-utilities" Jan 22 17:15:00 crc kubenswrapper[5032]: E0122 17:15:00.193225 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="extract-content" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.193285 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="extract-content" Jan 22 17:15:00 crc kubenswrapper[5032]: E0122 17:15:00.193366 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="registry-server" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.193415 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="registry-server" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.193647 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e9fac6-aa45-4767-8db6-1a53a6d35689" containerName="registry-server" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.194337 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.196191 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.196590 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.211751 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn"] Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.363154 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc2cq\" (UniqueName: \"kubernetes.io/projected/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-kube-api-access-jc2cq\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.363826 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-secret-volume\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.364242 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-config-volume\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.466196 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-secret-volume\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.466323 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-config-volume\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.466381 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc2cq\" (UniqueName: \"kubernetes.io/projected/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-kube-api-access-jc2cq\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.468589 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-config-volume\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.473635 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-secret-volume\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.493227 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc2cq\" (UniqueName: \"kubernetes.io/projected/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-kube-api-access-jc2cq\") pod \"collect-profiles-29485035-tk4dn\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:00 crc kubenswrapper[5032]: I0122 17:15:00.536821 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:01 crc kubenswrapper[5032]: I0122 17:15:01.021912 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn"] Jan 22 17:15:01 crc kubenswrapper[5032]: W0122 17:15:01.028052 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6810aad1_56b6_42d7_a0b6_ea8757b6e8dd.slice/crio-def51f24c9a9d908bb77dd1b2f3178615fa345d7ffd1da6a85d9ddec3c46bad2 WatchSource:0}: Error finding container def51f24c9a9d908bb77dd1b2f3178615fa345d7ffd1da6a85d9ddec3c46bad2: Status 404 returned error can't find the container with id def51f24c9a9d908bb77dd1b2f3178615fa345d7ffd1da6a85d9ddec3c46bad2 Jan 22 17:15:01 crc kubenswrapper[5032]: I0122 17:15:01.805851 5032 generic.go:334] "Generic (PLEG): container finished" podID="6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" containerID="4ce52aa1461ddf7871bef8a460035fafb21baa9e287ed3b799902e314584a2d8" exitCode=0 Jan 22 17:15:01 crc kubenswrapper[5032]: I0122 17:15:01.805983 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" event={"ID":"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd","Type":"ContainerDied","Data":"4ce52aa1461ddf7871bef8a460035fafb21baa9e287ed3b799902e314584a2d8"} Jan 22 17:15:01 crc kubenswrapper[5032]: I0122 17:15:01.806362 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" event={"ID":"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd","Type":"ContainerStarted","Data":"def51f24c9a9d908bb77dd1b2f3178615fa345d7ffd1da6a85d9ddec3c46bad2"} Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.018488 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.018922 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.018986 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.020253 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c9f7cfd9e8804d35f2e331c5fc47a37c0c6fecaef63932fc83da888c4907f3b8"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.020341 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://c9f7cfd9e8804d35f2e331c5fc47a37c0c6fecaef63932fc83da888c4907f3b8" gracePeriod=600 Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.267229 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.426123 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-config-volume\") pod \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.426597 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jc2cq\" (UniqueName: \"kubernetes.io/projected/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-kube-api-access-jc2cq\") pod \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.426721 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-secret-volume\") pod \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\" (UID: \"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd\") " Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.426767 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-config-volume" (OuterVolumeSpecName: "config-volume") pod "6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" (UID: "6810aad1-56b6-42d7-a0b6-ea8757b6e8dd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.428064 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.435064 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-kube-api-access-jc2cq" (OuterVolumeSpecName: "kube-api-access-jc2cq") pod "6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" (UID: "6810aad1-56b6-42d7-a0b6-ea8757b6e8dd"). InnerVolumeSpecName "kube-api-access-jc2cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.435128 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" (UID: "6810aad1-56b6-42d7-a0b6-ea8757b6e8dd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.530400 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jc2cq\" (UniqueName: \"kubernetes.io/projected/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-kube-api-access-jc2cq\") on node \"crc\" DevicePath \"\"" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.530443 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.832468 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="c9f7cfd9e8804d35f2e331c5fc47a37c0c6fecaef63932fc83da888c4907f3b8" exitCode=0 Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.832555 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"c9f7cfd9e8804d35f2e331c5fc47a37c0c6fecaef63932fc83da888c4907f3b8"} Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.833107 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9"} Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.833149 5032 scope.go:117] "RemoveContainer" containerID="1c41dd8f2b0328475e2410e61a72a88a0b2222e7eb4e265fda272be31d803629" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.836434 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" event={"ID":"6810aad1-56b6-42d7-a0b6-ea8757b6e8dd","Type":"ContainerDied","Data":"def51f24c9a9d908bb77dd1b2f3178615fa345d7ffd1da6a85d9ddec3c46bad2"} Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.836480 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="def51f24c9a9d908bb77dd1b2f3178615fa345d7ffd1da6a85d9ddec3c46bad2" Jan 22 17:15:03 crc kubenswrapper[5032]: I0122 17:15:03.836569 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn" Jan 22 17:15:04 crc kubenswrapper[5032]: I0122 17:15:04.355357 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r"] Jan 22 17:15:04 crc kubenswrapper[5032]: I0122 17:15:04.365499 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484990-rhb5r"] Jan 22 17:15:04 crc kubenswrapper[5032]: I0122 17:15:04.470552 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f62a5ce0-c3e7-4db0-b367-0ac223d92c72" path="/var/lib/kubelet/pods/f62a5ce0-c3e7-4db0-b367-0ac223d92c72/volumes" Jan 22 17:15:20 crc kubenswrapper[5032]: I0122 17:15:20.670677 5032 scope.go:117] "RemoveContainer" containerID="a623d89ad21f181856f7035dfc391e6de011d3ed6f667a5ae0c29e47dac0477f" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.204495 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-f68j2"] Jan 22 17:16:08 crc kubenswrapper[5032]: E0122 17:16:08.206061 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" containerName="collect-profiles" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.206096 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" containerName="collect-profiles" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.206580 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" containerName="collect-profiles" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.209386 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.216474 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f68j2"] Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.343455 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-utilities\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.344192 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-catalog-content\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.344220 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99qnr\" (UniqueName: \"kubernetes.io/projected/cb798cc7-7a2b-4433-9398-6e6aa574407c-kube-api-access-99qnr\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.445811 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-utilities\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.445888 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-catalog-content\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.445917 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99qnr\" (UniqueName: \"kubernetes.io/projected/cb798cc7-7a2b-4433-9398-6e6aa574407c-kube-api-access-99qnr\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.446310 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-utilities\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.446389 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-catalog-content\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.466541 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99qnr\" (UniqueName: \"kubernetes.io/projected/cb798cc7-7a2b-4433-9398-6e6aa574407c-kube-api-access-99qnr\") pod \"redhat-operators-f68j2\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:08 crc kubenswrapper[5032]: I0122 17:16:08.558234 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:09 crc kubenswrapper[5032]: I0122 17:16:09.057916 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f68j2"] Jan 22 17:16:09 crc kubenswrapper[5032]: I0122 17:16:09.469160 5032 generic.go:334] "Generic (PLEG): container finished" podID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerID="aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df" exitCode=0 Jan 22 17:16:09 crc kubenswrapper[5032]: I0122 17:16:09.469269 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f68j2" event={"ID":"cb798cc7-7a2b-4433-9398-6e6aa574407c","Type":"ContainerDied","Data":"aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df"} Jan 22 17:16:09 crc kubenswrapper[5032]: I0122 17:16:09.469444 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f68j2" event={"ID":"cb798cc7-7a2b-4433-9398-6e6aa574407c","Type":"ContainerStarted","Data":"09099525ef5efef27fede76e908c8c27b4c3f25d0de7e32b863588ed17e366c9"} Jan 22 17:16:09 crc kubenswrapper[5032]: I0122 17:16:09.471529 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:16:11 crc kubenswrapper[5032]: I0122 17:16:11.494474 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f68j2" event={"ID":"cb798cc7-7a2b-4433-9398-6e6aa574407c","Type":"ContainerStarted","Data":"279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5"} Jan 22 17:16:14 crc kubenswrapper[5032]: I0122 17:16:14.532396 5032 generic.go:334] "Generic (PLEG): container finished" podID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerID="279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5" exitCode=0 Jan 22 17:16:14 crc kubenswrapper[5032]: I0122 17:16:14.532448 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f68j2" event={"ID":"cb798cc7-7a2b-4433-9398-6e6aa574407c","Type":"ContainerDied","Data":"279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5"} Jan 22 17:16:16 crc kubenswrapper[5032]: I0122 17:16:16.549353 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f68j2" event={"ID":"cb798cc7-7a2b-4433-9398-6e6aa574407c","Type":"ContainerStarted","Data":"a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96"} Jan 22 17:16:16 crc kubenswrapper[5032]: I0122 17:16:16.569923 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-f68j2" podStartSLOduration=2.638748059 podStartE2EDuration="8.56990759s" podCreationTimestamp="2026-01-22 17:16:08 +0000 UTC" firstStartedPulling="2026-01-22 17:16:09.471293797 +0000 UTC m=+4273.283462828" lastFinishedPulling="2026-01-22 17:16:15.402453328 +0000 UTC m=+4279.214622359" observedRunningTime="2026-01-22 17:16:16.566482129 +0000 UTC m=+4280.378651170" watchObservedRunningTime="2026-01-22 17:16:16.56990759 +0000 UTC m=+4280.382076621" Jan 22 17:16:18 crc kubenswrapper[5032]: I0122 17:16:18.559077 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:18 crc kubenswrapper[5032]: I0122 17:16:18.559476 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:19 crc kubenswrapper[5032]: I0122 17:16:19.613157 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f68j2" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="registry-server" probeResult="failure" output=< Jan 22 17:16:19 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 17:16:19 crc kubenswrapper[5032]: > Jan 22 17:16:29 crc kubenswrapper[5032]: I0122 17:16:29.624387 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f68j2" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="registry-server" probeResult="failure" output=< Jan 22 17:16:29 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 17:16:29 crc kubenswrapper[5032]: > Jan 22 17:16:38 crc kubenswrapper[5032]: I0122 17:16:38.639829 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:38 crc kubenswrapper[5032]: I0122 17:16:38.733734 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:39 crc kubenswrapper[5032]: I0122 17:16:39.391902 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f68j2"] Jan 22 17:16:39 crc kubenswrapper[5032]: I0122 17:16:39.781786 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-f68j2" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="registry-server" containerID="cri-o://a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96" gracePeriod=2 Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.346077 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.523767 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99qnr\" (UniqueName: \"kubernetes.io/projected/cb798cc7-7a2b-4433-9398-6e6aa574407c-kube-api-access-99qnr\") pod \"cb798cc7-7a2b-4433-9398-6e6aa574407c\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.523960 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-catalog-content\") pod \"cb798cc7-7a2b-4433-9398-6e6aa574407c\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.524010 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-utilities\") pod \"cb798cc7-7a2b-4433-9398-6e6aa574407c\" (UID: \"cb798cc7-7a2b-4433-9398-6e6aa574407c\") " Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.524949 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-utilities" (OuterVolumeSpecName: "utilities") pod "cb798cc7-7a2b-4433-9398-6e6aa574407c" (UID: "cb798cc7-7a2b-4433-9398-6e6aa574407c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.626729 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.680022 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb798cc7-7a2b-4433-9398-6e6aa574407c" (UID: "cb798cc7-7a2b-4433-9398-6e6aa574407c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.728366 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb798cc7-7a2b-4433-9398-6e6aa574407c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.792578 5032 generic.go:334] "Generic (PLEG): container finished" podID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerID="a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96" exitCode=0 Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.792622 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f68j2" event={"ID":"cb798cc7-7a2b-4433-9398-6e6aa574407c","Type":"ContainerDied","Data":"a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96"} Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.792649 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f68j2" event={"ID":"cb798cc7-7a2b-4433-9398-6e6aa574407c","Type":"ContainerDied","Data":"09099525ef5efef27fede76e908c8c27b4c3f25d0de7e32b863588ed17e366c9"} Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.792668 5032 scope.go:117] "RemoveContainer" containerID="a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.795355 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f68j2" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.824058 5032 scope.go:117] "RemoveContainer" containerID="279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.941244 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb798cc7-7a2b-4433-9398-6e6aa574407c-kube-api-access-99qnr" (OuterVolumeSpecName: "kube-api-access-99qnr") pod "cb798cc7-7a2b-4433-9398-6e6aa574407c" (UID: "cb798cc7-7a2b-4433-9398-6e6aa574407c"). InnerVolumeSpecName "kube-api-access-99qnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:16:40 crc kubenswrapper[5032]: I0122 17:16:40.954765 5032 scope.go:117] "RemoveContainer" containerID="aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.033932 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99qnr\" (UniqueName: \"kubernetes.io/projected/cb798cc7-7a2b-4433-9398-6e6aa574407c-kube-api-access-99qnr\") on node \"crc\" DevicePath \"\"" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.279435 5032 scope.go:117] "RemoveContainer" containerID="a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96" Jan 22 17:16:41 crc kubenswrapper[5032]: E0122 17:16:41.280692 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96\": container with ID starting with a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96 not found: ID does not exist" containerID="a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.280799 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96"} err="failed to get container status \"a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96\": rpc error: code = NotFound desc = could not find container \"a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96\": container with ID starting with a9153771b0b37ac5c5cc3ef287fa0fb444a44af7d41c1a9c08f5d56a30dd0b96 not found: ID does not exist" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.280905 5032 scope.go:117] "RemoveContainer" containerID="279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5" Jan 22 17:16:41 crc kubenswrapper[5032]: E0122 17:16:41.281532 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5\": container with ID starting with 279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5 not found: ID does not exist" containerID="279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.281577 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5"} err="failed to get container status \"279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5\": rpc error: code = NotFound desc = could not find container \"279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5\": container with ID starting with 279fd591f1cae739a2ff52e942144efe13542ce128a9af147759752dbfd626b5 not found: ID does not exist" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.281603 5032 scope.go:117] "RemoveContainer" containerID="aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df" Jan 22 17:16:41 crc kubenswrapper[5032]: E0122 17:16:41.284693 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df\": container with ID starting with aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df not found: ID does not exist" containerID="aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.284721 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df"} err="failed to get container status \"aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df\": rpc error: code = NotFound desc = could not find container \"aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df\": container with ID starting with aee4b354fbac95f1c1eb4bc47eea41b602c81a388840a2fb523fa5edb440a2df not found: ID does not exist" Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.323202 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f68j2"] Jan 22 17:16:41 crc kubenswrapper[5032]: I0122 17:16:41.337476 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-f68j2"] Jan 22 17:16:42 crc kubenswrapper[5032]: I0122 17:16:42.474547 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" path="/var/lib/kubelet/pods/cb798cc7-7a2b-4433-9398-6e6aa574407c/volumes" Jan 22 17:17:03 crc kubenswrapper[5032]: I0122 17:17:03.017148 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:17:03 crc kubenswrapper[5032]: I0122 17:17:03.018271 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:17:33 crc kubenswrapper[5032]: I0122 17:17:33.016477 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:17:33 crc kubenswrapper[5032]: I0122 17:17:33.017144 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.016723 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.017311 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.017362 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.018158 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.018238 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" gracePeriod=600 Jan 22 17:18:03 crc kubenswrapper[5032]: E0122 17:18:03.584412 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.630765 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" exitCode=0 Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.630832 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9"} Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.630919 5032 scope.go:117] "RemoveContainer" containerID="c9f7cfd9e8804d35f2e331c5fc47a37c0c6fecaef63932fc83da888c4907f3b8" Jan 22 17:18:03 crc kubenswrapper[5032]: I0122 17:18:03.631555 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:18:03 crc kubenswrapper[5032]: E0122 17:18:03.632023 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:18:18 crc kubenswrapper[5032]: I0122 17:18:18.455747 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:18:18 crc kubenswrapper[5032]: E0122 17:18:18.456824 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:18:30 crc kubenswrapper[5032]: I0122 17:18:30.455350 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:18:30 crc kubenswrapper[5032]: E0122 17:18:30.456145 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:18:44 crc kubenswrapper[5032]: I0122 17:18:44.454977 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:18:44 crc kubenswrapper[5032]: E0122 17:18:44.455924 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:18:58 crc kubenswrapper[5032]: I0122 17:18:58.454440 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:18:58 crc kubenswrapper[5032]: E0122 17:18:58.455293 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.779610 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rqxmd"] Jan 22 17:19:12 crc kubenswrapper[5032]: E0122 17:19:12.780839 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="extract-utilities" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.780887 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="extract-utilities" Jan 22 17:19:12 crc kubenswrapper[5032]: E0122 17:19:12.780906 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="registry-server" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.780918 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="registry-server" Jan 22 17:19:12 crc kubenswrapper[5032]: E0122 17:19:12.780940 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="extract-content" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.780953 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="extract-content" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.781332 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb798cc7-7a2b-4433-9398-6e6aa574407c" containerName="registry-server" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.783673 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.793369 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqxmd"] Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.924033 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-catalog-content\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.924190 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw5kr\" (UniqueName: \"kubernetes.io/projected/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-kube-api-access-tw5kr\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:12 crc kubenswrapper[5032]: I0122 17:19:12.924439 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-utilities\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.026700 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-catalog-content\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.026794 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw5kr\" (UniqueName: \"kubernetes.io/projected/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-kube-api-access-tw5kr\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.026897 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-utilities\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.027532 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-catalog-content\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.027601 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-utilities\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.454650 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:19:13 crc kubenswrapper[5032]: E0122 17:19:13.455352 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.529814 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw5kr\" (UniqueName: \"kubernetes.io/projected/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-kube-api-access-tw5kr\") pod \"redhat-marketplace-rqxmd\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:13 crc kubenswrapper[5032]: I0122 17:19:13.753626 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:14 crc kubenswrapper[5032]: I0122 17:19:14.211297 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqxmd"] Jan 22 17:19:14 crc kubenswrapper[5032]: I0122 17:19:14.360133 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqxmd" event={"ID":"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12","Type":"ContainerStarted","Data":"855a7e32f96e942884299204425c48d24fedfc829755f086eaedd215e6aa37d1"} Jan 22 17:19:15 crc kubenswrapper[5032]: I0122 17:19:15.369473 5032 generic.go:334] "Generic (PLEG): container finished" podID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerID="cf6cc8356d3e751106b968aa5f25665ade2089bbbf01508af4c07c143285691a" exitCode=0 Jan 22 17:19:15 crc kubenswrapper[5032]: I0122 17:19:15.369523 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqxmd" event={"ID":"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12","Type":"ContainerDied","Data":"cf6cc8356d3e751106b968aa5f25665ade2089bbbf01508af4c07c143285691a"} Jan 22 17:19:16 crc kubenswrapper[5032]: I0122 17:19:16.408064 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqxmd" event={"ID":"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12","Type":"ContainerStarted","Data":"63fad90aa7186b48322cc3fb09f580c151070782d3907c71d2a5694aa224c685"} Jan 22 17:19:17 crc kubenswrapper[5032]: I0122 17:19:17.417838 5032 generic.go:334] "Generic (PLEG): container finished" podID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerID="63fad90aa7186b48322cc3fb09f580c151070782d3907c71d2a5694aa224c685" exitCode=0 Jan 22 17:19:17 crc kubenswrapper[5032]: I0122 17:19:17.417910 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqxmd" event={"ID":"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12","Type":"ContainerDied","Data":"63fad90aa7186b48322cc3fb09f580c151070782d3907c71d2a5694aa224c685"} Jan 22 17:19:19 crc kubenswrapper[5032]: I0122 17:19:19.445508 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqxmd" event={"ID":"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12","Type":"ContainerStarted","Data":"ddabd94ea4406cf6806eb40bc9153f033a3a9dadd85bd6ef8509ae80e8c81548"} Jan 22 17:19:19 crc kubenswrapper[5032]: I0122 17:19:19.474506 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rqxmd" podStartSLOduration=4.612479368 podStartE2EDuration="7.474486412s" podCreationTimestamp="2026-01-22 17:19:12 +0000 UTC" firstStartedPulling="2026-01-22 17:19:15.371250602 +0000 UTC m=+4459.183419643" lastFinishedPulling="2026-01-22 17:19:18.233257616 +0000 UTC m=+4462.045426687" observedRunningTime="2026-01-22 17:19:19.4627979 +0000 UTC m=+4463.274966941" watchObservedRunningTime="2026-01-22 17:19:19.474486412 +0000 UTC m=+4463.286655443" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.141176 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vv8gq"] Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.144503 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.156397 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vv8gq"] Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.248164 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwfm2\" (UniqueName: \"kubernetes.io/projected/2797405e-81ab-4368-8298-6a232c327df2-kube-api-access-zwfm2\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.248494 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-utilities\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.248529 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-catalog-content\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.350820 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwfm2\" (UniqueName: \"kubernetes.io/projected/2797405e-81ab-4368-8298-6a232c327df2-kube-api-access-zwfm2\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.350902 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-utilities\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.350931 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-catalog-content\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.351437 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-catalog-content\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.351538 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-utilities\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.373062 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwfm2\" (UniqueName: \"kubernetes.io/projected/2797405e-81ab-4368-8298-6a232c327df2-kube-api-access-zwfm2\") pod \"community-operators-vv8gq\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.482215 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.753870 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.754003 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:23 crc kubenswrapper[5032]: I0122 17:19:23.869218 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:24 crc kubenswrapper[5032]: I0122 17:19:24.269121 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vv8gq"] Jan 22 17:19:24 crc kubenswrapper[5032]: W0122 17:19:24.276206 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2797405e_81ab_4368_8298_6a232c327df2.slice/crio-b7db3ec0931449d3eebf6b131b3c60ac84295dd1230d68f4799e321d30890505 WatchSource:0}: Error finding container b7db3ec0931449d3eebf6b131b3c60ac84295dd1230d68f4799e321d30890505: Status 404 returned error can't find the container with id b7db3ec0931449d3eebf6b131b3c60ac84295dd1230d68f4799e321d30890505 Jan 22 17:19:24 crc kubenswrapper[5032]: I0122 17:19:24.498258 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vv8gq" event={"ID":"2797405e-81ab-4368-8298-6a232c327df2","Type":"ContainerStarted","Data":"1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad"} Jan 22 17:19:24 crc kubenswrapper[5032]: I0122 17:19:24.498690 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vv8gq" event={"ID":"2797405e-81ab-4368-8298-6a232c327df2","Type":"ContainerStarted","Data":"b7db3ec0931449d3eebf6b131b3c60ac84295dd1230d68f4799e321d30890505"} Jan 22 17:19:24 crc kubenswrapper[5032]: I0122 17:19:24.551948 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:25 crc kubenswrapper[5032]: I0122 17:19:25.455762 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:19:25 crc kubenswrapper[5032]: E0122 17:19:25.458680 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:19:25 crc kubenswrapper[5032]: I0122 17:19:25.518817 5032 generic.go:334] "Generic (PLEG): container finished" podID="2797405e-81ab-4368-8298-6a232c327df2" containerID="1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad" exitCode=0 Jan 22 17:19:25 crc kubenswrapper[5032]: I0122 17:19:25.520000 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vv8gq" event={"ID":"2797405e-81ab-4368-8298-6a232c327df2","Type":"ContainerDied","Data":"1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad"} Jan 22 17:19:27 crc kubenswrapper[5032]: I0122 17:19:27.537328 5032 generic.go:334] "Generic (PLEG): container finished" podID="2797405e-81ab-4368-8298-6a232c327df2" containerID="7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb" exitCode=0 Jan 22 17:19:27 crc kubenswrapper[5032]: I0122 17:19:27.537443 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vv8gq" event={"ID":"2797405e-81ab-4368-8298-6a232c327df2","Type":"ContainerDied","Data":"7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb"} Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.337915 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqxmd"] Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.338231 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rqxmd" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="registry-server" containerID="cri-o://ddabd94ea4406cf6806eb40bc9153f033a3a9dadd85bd6ef8509ae80e8c81548" gracePeriod=2 Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.549511 5032 generic.go:334] "Generic (PLEG): container finished" podID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerID="ddabd94ea4406cf6806eb40bc9153f033a3a9dadd85bd6ef8509ae80e8c81548" exitCode=0 Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.549650 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqxmd" event={"ID":"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12","Type":"ContainerDied","Data":"ddabd94ea4406cf6806eb40bc9153f033a3a9dadd85bd6ef8509ae80e8c81548"} Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.797501 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.859892 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-catalog-content\") pod \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.860002 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tw5kr\" (UniqueName: \"kubernetes.io/projected/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-kube-api-access-tw5kr\") pod \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.860177 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-utilities\") pod \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\" (UID: \"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12\") " Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.860984 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-utilities" (OuterVolumeSpecName: "utilities") pod "0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" (UID: "0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.873581 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-kube-api-access-tw5kr" (OuterVolumeSpecName: "kube-api-access-tw5kr") pod "0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" (UID: "0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12"). InnerVolumeSpecName "kube-api-access-tw5kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.907042 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" (UID: "0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.962321 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.962366 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:19:28 crc kubenswrapper[5032]: I0122 17:19:28.962398 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tw5kr\" (UniqueName: \"kubernetes.io/projected/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12-kube-api-access-tw5kr\") on node \"crc\" DevicePath \"\"" Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.566495 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqxmd" event={"ID":"0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12","Type":"ContainerDied","Data":"855a7e32f96e942884299204425c48d24fedfc829755f086eaedd215e6aa37d1"} Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.566606 5032 scope.go:117] "RemoveContainer" containerID="ddabd94ea4406cf6806eb40bc9153f033a3a9dadd85bd6ef8509ae80e8c81548" Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.566624 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqxmd" Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.579692 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vv8gq" event={"ID":"2797405e-81ab-4368-8298-6a232c327df2","Type":"ContainerStarted","Data":"c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d"} Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.594617 5032 scope.go:117] "RemoveContainer" containerID="63fad90aa7186b48322cc3fb09f580c151070782d3907c71d2a5694aa224c685" Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.609524 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vv8gq" podStartSLOduration=3.859825957 podStartE2EDuration="6.60949987s" podCreationTimestamp="2026-01-22 17:19:23 +0000 UTC" firstStartedPulling="2026-01-22 17:19:25.522068322 +0000 UTC m=+4469.334237373" lastFinishedPulling="2026-01-22 17:19:28.271742215 +0000 UTC m=+4472.083911286" observedRunningTime="2026-01-22 17:19:29.606281314 +0000 UTC m=+4473.418450355" watchObservedRunningTime="2026-01-22 17:19:29.60949987 +0000 UTC m=+4473.421668901" Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.645550 5032 scope.go:117] "RemoveContainer" containerID="cf6cc8356d3e751106b968aa5f25665ade2089bbbf01508af4c07c143285691a" Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.663733 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqxmd"] Jan 22 17:19:29 crc kubenswrapper[5032]: I0122 17:19:29.673437 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqxmd"] Jan 22 17:19:29 crc kubenswrapper[5032]: E0122 17:19:29.809981 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ccb1f13_d4bf_4e43_8aa2_cdca1dc91d12.slice/crio-855a7e32f96e942884299204425c48d24fedfc829755f086eaedd215e6aa37d1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ccb1f13_d4bf_4e43_8aa2_cdca1dc91d12.slice\": RecentStats: unable to find data in memory cache]" Jan 22 17:19:30 crc kubenswrapper[5032]: I0122 17:19:30.466897 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" path="/var/lib/kubelet/pods/0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12/volumes" Jan 22 17:19:33 crc kubenswrapper[5032]: I0122 17:19:33.483303 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:33 crc kubenswrapper[5032]: I0122 17:19:33.485252 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:33 crc kubenswrapper[5032]: I0122 17:19:33.548141 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:33 crc kubenswrapper[5032]: I0122 17:19:33.704249 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:34 crc kubenswrapper[5032]: I0122 17:19:34.733069 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vv8gq"] Jan 22 17:19:36 crc kubenswrapper[5032]: I0122 17:19:36.468534 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:19:36 crc kubenswrapper[5032]: E0122 17:19:36.469318 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:19:36 crc kubenswrapper[5032]: I0122 17:19:36.658465 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vv8gq" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="registry-server" containerID="cri-o://c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d" gracePeriod=2 Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.265355 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.466379 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-catalog-content\") pod \"2797405e-81ab-4368-8298-6a232c327df2\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.467202 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwfm2\" (UniqueName: \"kubernetes.io/projected/2797405e-81ab-4368-8298-6a232c327df2-kube-api-access-zwfm2\") pod \"2797405e-81ab-4368-8298-6a232c327df2\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.467430 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-utilities\") pod \"2797405e-81ab-4368-8298-6a232c327df2\" (UID: \"2797405e-81ab-4368-8298-6a232c327df2\") " Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.468078 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-utilities" (OuterVolumeSpecName: "utilities") pod "2797405e-81ab-4368-8298-6a232c327df2" (UID: "2797405e-81ab-4368-8298-6a232c327df2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.469791 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.472714 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2797405e-81ab-4368-8298-6a232c327df2-kube-api-access-zwfm2" (OuterVolumeSpecName: "kube-api-access-zwfm2") pod "2797405e-81ab-4368-8298-6a232c327df2" (UID: "2797405e-81ab-4368-8298-6a232c327df2"). InnerVolumeSpecName "kube-api-access-zwfm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.546555 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2797405e-81ab-4368-8298-6a232c327df2" (UID: "2797405e-81ab-4368-8298-6a232c327df2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.571564 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwfm2\" (UniqueName: \"kubernetes.io/projected/2797405e-81ab-4368-8298-6a232c327df2-kube-api-access-zwfm2\") on node \"crc\" DevicePath \"\"" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.571606 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2797405e-81ab-4368-8298-6a232c327df2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.668604 5032 generic.go:334] "Generic (PLEG): container finished" podID="2797405e-81ab-4368-8298-6a232c327df2" containerID="c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d" exitCode=0 Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.668651 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vv8gq" event={"ID":"2797405e-81ab-4368-8298-6a232c327df2","Type":"ContainerDied","Data":"c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d"} Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.668723 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vv8gq" event={"ID":"2797405e-81ab-4368-8298-6a232c327df2","Type":"ContainerDied","Data":"b7db3ec0931449d3eebf6b131b3c60ac84295dd1230d68f4799e321d30890505"} Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.668754 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vv8gq" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.668757 5032 scope.go:117] "RemoveContainer" containerID="c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.699882 5032 scope.go:117] "RemoveContainer" containerID="7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.718230 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vv8gq"] Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.727602 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vv8gq"] Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.737181 5032 scope.go:117] "RemoveContainer" containerID="1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.799466 5032 scope.go:117] "RemoveContainer" containerID="c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d" Jan 22 17:19:37 crc kubenswrapper[5032]: E0122 17:19:37.800192 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d\": container with ID starting with c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d not found: ID does not exist" containerID="c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.800239 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d"} err="failed to get container status \"c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d\": rpc error: code = NotFound desc = could not find container \"c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d\": container with ID starting with c6484c8b606d87bc48b2fca7557688cf71abd59eb8d6fcc4b65ec6e11c10a68d not found: ID does not exist" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.800267 5032 scope.go:117] "RemoveContainer" containerID="7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb" Jan 22 17:19:37 crc kubenswrapper[5032]: E0122 17:19:37.800552 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb\": container with ID starting with 7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb not found: ID does not exist" containerID="7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.800595 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb"} err="failed to get container status \"7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb\": rpc error: code = NotFound desc = could not find container \"7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb\": container with ID starting with 7c82d3477bde02fa512d7fa916914aa10f59c7308e7f09bc36182d331cac9bfb not found: ID does not exist" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.800625 5032 scope.go:117] "RemoveContainer" containerID="1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad" Jan 22 17:19:37 crc kubenswrapper[5032]: E0122 17:19:37.800827 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad\": container with ID starting with 1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad not found: ID does not exist" containerID="1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad" Jan 22 17:19:37 crc kubenswrapper[5032]: I0122 17:19:37.800851 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad"} err="failed to get container status \"1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad\": rpc error: code = NotFound desc = could not find container \"1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad\": container with ID starting with 1c5544959ec8ff16ed5835bd11292bdf757196d7c27407362107a77ffdd301ad not found: ID does not exist" Jan 22 17:19:38 crc kubenswrapper[5032]: I0122 17:19:38.473940 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2797405e-81ab-4368-8298-6a232c327df2" path="/var/lib/kubelet/pods/2797405e-81ab-4368-8298-6a232c327df2/volumes" Jan 22 17:19:50 crc kubenswrapper[5032]: I0122 17:19:50.454430 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:19:50 crc kubenswrapper[5032]: E0122 17:19:50.455302 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:20:02 crc kubenswrapper[5032]: I0122 17:20:02.455616 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:20:02 crc kubenswrapper[5032]: E0122 17:20:02.456615 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:20:13 crc kubenswrapper[5032]: I0122 17:20:13.455055 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:20:13 crc kubenswrapper[5032]: E0122 17:20:13.456114 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:20:24 crc kubenswrapper[5032]: I0122 17:20:24.455345 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:20:24 crc kubenswrapper[5032]: E0122 17:20:24.456113 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:20:36 crc kubenswrapper[5032]: I0122 17:20:36.462444 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:20:36 crc kubenswrapper[5032]: E0122 17:20:36.472997 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:20:47 crc kubenswrapper[5032]: I0122 17:20:47.454667 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:20:47 crc kubenswrapper[5032]: E0122 17:20:47.455523 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:21:00 crc kubenswrapper[5032]: I0122 17:21:00.454464 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:21:00 crc kubenswrapper[5032]: E0122 17:21:00.455140 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:21:15 crc kubenswrapper[5032]: I0122 17:21:15.455026 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:21:15 crc kubenswrapper[5032]: E0122 17:21:15.455971 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:21:30 crc kubenswrapper[5032]: I0122 17:21:30.455638 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:21:30 crc kubenswrapper[5032]: E0122 17:21:30.456801 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:21:43 crc kubenswrapper[5032]: I0122 17:21:43.454679 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:21:43 crc kubenswrapper[5032]: E0122 17:21:43.455481 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:21:55 crc kubenswrapper[5032]: I0122 17:21:55.455282 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:21:55 crc kubenswrapper[5032]: E0122 17:21:55.456437 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:22:08 crc kubenswrapper[5032]: I0122 17:22:08.456158 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:22:08 crc kubenswrapper[5032]: E0122 17:22:08.457372 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.284288 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lpfnw"] Jan 22 17:22:17 crc kubenswrapper[5032]: E0122 17:22:17.285461 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="extract-utilities" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285478 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="extract-utilities" Jan 22 17:22:17 crc kubenswrapper[5032]: E0122 17:22:17.285495 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="extract-content" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285503 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="extract-content" Jan 22 17:22:17 crc kubenswrapper[5032]: E0122 17:22:17.285520 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="extract-utilities" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285528 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="extract-utilities" Jan 22 17:22:17 crc kubenswrapper[5032]: E0122 17:22:17.285554 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="registry-server" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285561 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="registry-server" Jan 22 17:22:17 crc kubenswrapper[5032]: E0122 17:22:17.285574 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="registry-server" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285582 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="registry-server" Jan 22 17:22:17 crc kubenswrapper[5032]: E0122 17:22:17.285601 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="extract-content" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285608 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="extract-content" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285833 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ccb1f13-d4bf-4e43-8aa2-cdca1dc91d12" containerName="registry-server" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.285875 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="2797405e-81ab-4368-8298-6a232c327df2" containerName="registry-server" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.287551 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.318257 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lpfnw"] Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.401819 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmv2t\" (UniqueName: \"kubernetes.io/projected/63ac7df3-5723-464e-af5d-873703270818-kube-api-access-rmv2t\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.401896 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-utilities\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.402611 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-catalog-content\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.503944 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-catalog-content\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.504305 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmv2t\" (UniqueName: \"kubernetes.io/projected/63ac7df3-5723-464e-af5d-873703270818-kube-api-access-rmv2t\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.504336 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-utilities\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.504877 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-catalog-content\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.504925 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-utilities\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.542026 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmv2t\" (UniqueName: \"kubernetes.io/projected/63ac7df3-5723-464e-af5d-873703270818-kube-api-access-rmv2t\") pod \"certified-operators-lpfnw\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:17 crc kubenswrapper[5032]: I0122 17:22:17.616803 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:18 crc kubenswrapper[5032]: I0122 17:22:18.112155 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lpfnw"] Jan 22 17:22:18 crc kubenswrapper[5032]: I0122 17:22:18.364643 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpfnw" event={"ID":"63ac7df3-5723-464e-af5d-873703270818","Type":"ContainerStarted","Data":"8dd2ba8d3d4f668b46ae1a9581616003469d9337487547eb46c8c8e560893087"} Jan 22 17:22:19 crc kubenswrapper[5032]: I0122 17:22:19.373772 5032 generic.go:334] "Generic (PLEG): container finished" podID="63ac7df3-5723-464e-af5d-873703270818" containerID="7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420" exitCode=0 Jan 22 17:22:19 crc kubenswrapper[5032]: I0122 17:22:19.373994 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpfnw" event={"ID":"63ac7df3-5723-464e-af5d-873703270818","Type":"ContainerDied","Data":"7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420"} Jan 22 17:22:19 crc kubenswrapper[5032]: I0122 17:22:19.376074 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:22:22 crc kubenswrapper[5032]: I0122 17:22:22.455296 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:22:22 crc kubenswrapper[5032]: E0122 17:22:22.456079 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:22:28 crc kubenswrapper[5032]: I0122 17:22:28.447736 5032 generic.go:334] "Generic (PLEG): container finished" podID="63ac7df3-5723-464e-af5d-873703270818" containerID="94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b" exitCode=0 Jan 22 17:22:28 crc kubenswrapper[5032]: I0122 17:22:28.447817 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpfnw" event={"ID":"63ac7df3-5723-464e-af5d-873703270818","Type":"ContainerDied","Data":"94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b"} Jan 22 17:22:32 crc kubenswrapper[5032]: I0122 17:22:32.493978 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpfnw" event={"ID":"63ac7df3-5723-464e-af5d-873703270818","Type":"ContainerStarted","Data":"2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0"} Jan 22 17:22:32 crc kubenswrapper[5032]: I0122 17:22:32.524027 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lpfnw" podStartSLOduration=2.749886522 podStartE2EDuration="15.524006533s" podCreationTimestamp="2026-01-22 17:22:17 +0000 UTC" firstStartedPulling="2026-01-22 17:22:19.375869128 +0000 UTC m=+4643.188038159" lastFinishedPulling="2026-01-22 17:22:32.149989099 +0000 UTC m=+4655.962158170" observedRunningTime="2026-01-22 17:22:32.51262082 +0000 UTC m=+4656.324789871" watchObservedRunningTime="2026-01-22 17:22:32.524006533 +0000 UTC m=+4656.336175574" Jan 22 17:22:33 crc kubenswrapper[5032]: I0122 17:22:33.455609 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:22:33 crc kubenswrapper[5032]: E0122 17:22:33.456107 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:22:37 crc kubenswrapper[5032]: I0122 17:22:37.617602 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:37 crc kubenswrapper[5032]: I0122 17:22:37.618291 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:37 crc kubenswrapper[5032]: I0122 17:22:37.682661 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:38 crc kubenswrapper[5032]: I0122 17:22:38.596102 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:22:38 crc kubenswrapper[5032]: I0122 17:22:38.667258 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lpfnw"] Jan 22 17:22:38 crc kubenswrapper[5032]: I0122 17:22:38.715192 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dxxqb"] Jan 22 17:22:38 crc kubenswrapper[5032]: I0122 17:22:38.715426 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dxxqb" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="registry-server" containerID="cri-o://5191032f20fc5e555cfc958f43b0d38396214a88bb7108868ed5e58ce9f0260e" gracePeriod=2 Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.560102 5032 generic.go:334] "Generic (PLEG): container finished" podID="c9e2b438-91ae-489f-ba17-456445ad126a" containerID="5191032f20fc5e555cfc958f43b0d38396214a88bb7108868ed5e58ce9f0260e" exitCode=0 Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.560982 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dxxqb" event={"ID":"c9e2b438-91ae-489f-ba17-456445ad126a","Type":"ContainerDied","Data":"5191032f20fc5e555cfc958f43b0d38396214a88bb7108868ed5e58ce9f0260e"} Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.779038 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.799095 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-utilities\") pod \"c9e2b438-91ae-489f-ba17-456445ad126a\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.799318 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmlr6\" (UniqueName: \"kubernetes.io/projected/c9e2b438-91ae-489f-ba17-456445ad126a-kube-api-access-kmlr6\") pod \"c9e2b438-91ae-489f-ba17-456445ad126a\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.799486 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-catalog-content\") pod \"c9e2b438-91ae-489f-ba17-456445ad126a\" (UID: \"c9e2b438-91ae-489f-ba17-456445ad126a\") " Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.800579 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-utilities" (OuterVolumeSpecName: "utilities") pod "c9e2b438-91ae-489f-ba17-456445ad126a" (UID: "c9e2b438-91ae-489f-ba17-456445ad126a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.833806 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9e2b438-91ae-489f-ba17-456445ad126a-kube-api-access-kmlr6" (OuterVolumeSpecName: "kube-api-access-kmlr6") pod "c9e2b438-91ae-489f-ba17-456445ad126a" (UID: "c9e2b438-91ae-489f-ba17-456445ad126a"). InnerVolumeSpecName "kube-api-access-kmlr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.865652 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9e2b438-91ae-489f-ba17-456445ad126a" (UID: "c9e2b438-91ae-489f-ba17-456445ad126a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.901533 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.901843 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b438-91ae-489f-ba17-456445ad126a-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:22:39 crc kubenswrapper[5032]: I0122 17:22:39.901872 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmlr6\" (UniqueName: \"kubernetes.io/projected/c9e2b438-91ae-489f-ba17-456445ad126a-kube-api-access-kmlr6\") on node \"crc\" DevicePath \"\"" Jan 22 17:22:40 crc kubenswrapper[5032]: I0122 17:22:40.569291 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dxxqb" Jan 22 17:22:40 crc kubenswrapper[5032]: I0122 17:22:40.569336 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dxxqb" event={"ID":"c9e2b438-91ae-489f-ba17-456445ad126a","Type":"ContainerDied","Data":"ce7c6a629e2e04cad38cdaaeafef5ef7031e05020e3d440d5d4f38396087366e"} Jan 22 17:22:40 crc kubenswrapper[5032]: I0122 17:22:40.569370 5032 scope.go:117] "RemoveContainer" containerID="5191032f20fc5e555cfc958f43b0d38396214a88bb7108868ed5e58ce9f0260e" Jan 22 17:22:40 crc kubenswrapper[5032]: I0122 17:22:40.593134 5032 scope.go:117] "RemoveContainer" containerID="092afb9cb50ca41bfad40468f0e3e4e729d35c9e47686fdc98a454603baf66da" Jan 22 17:22:40 crc kubenswrapper[5032]: I0122 17:22:40.594103 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dxxqb"] Jan 22 17:22:40 crc kubenswrapper[5032]: I0122 17:22:40.601785 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dxxqb"] Jan 22 17:22:40 crc kubenswrapper[5032]: I0122 17:22:40.613529 5032 scope.go:117] "RemoveContainer" containerID="ef7215a727fbcc0c3443d8d6eef4e3178abe10f233d1a847a4b1bea1dcc9bcc9" Jan 22 17:22:42 crc kubenswrapper[5032]: I0122 17:22:42.469325 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" path="/var/lib/kubelet/pods/c9e2b438-91ae-489f-ba17-456445ad126a/volumes" Jan 22 17:22:48 crc kubenswrapper[5032]: I0122 17:22:48.454600 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:22:48 crc kubenswrapper[5032]: E0122 17:22:48.455432 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:23:01 crc kubenswrapper[5032]: I0122 17:23:01.455022 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:23:01 crc kubenswrapper[5032]: E0122 17:23:01.455940 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:23:13 crc kubenswrapper[5032]: I0122 17:23:13.455986 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:23:13 crc kubenswrapper[5032]: I0122 17:23:13.918014 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"47826c9907e42fffe43858bf07c077a1f8d85a713f730bd3570ca8f4a3390e47"} Jan 22 17:25:33 crc kubenswrapper[5032]: I0122 17:25:33.016958 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:25:33 crc kubenswrapper[5032]: I0122 17:25:33.017513 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:26:03 crc kubenswrapper[5032]: I0122 17:26:03.016444 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:26:03 crc kubenswrapper[5032]: I0122 17:26:03.017065 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.017697 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.018543 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.018641 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.020176 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"47826c9907e42fffe43858bf07c077a1f8d85a713f730bd3570ca8f4a3390e47"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.020288 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://47826c9907e42fffe43858bf07c077a1f8d85a713f730bd3570ca8f4a3390e47" gracePeriod=600 Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.935483 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="47826c9907e42fffe43858bf07c077a1f8d85a713f730bd3570ca8f4a3390e47" exitCode=0 Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.936158 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"47826c9907e42fffe43858bf07c077a1f8d85a713f730bd3570ca8f4a3390e47"} Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.936185 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5"} Jan 22 17:26:33 crc kubenswrapper[5032]: I0122 17:26:33.936201 5032 scope.go:117] "RemoveContainer" containerID="af9c2144c42cc984beb4a6ddee30a4cf0fab30f66d2eb8adf916d31d31ef07b9" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.293832 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-thdtf"] Jan 22 17:27:55 crc kubenswrapper[5032]: E0122 17:27:55.294946 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="extract-content" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.294973 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="extract-content" Jan 22 17:27:55 crc kubenswrapper[5032]: E0122 17:27:55.295018 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="registry-server" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.295031 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="registry-server" Jan 22 17:27:55 crc kubenswrapper[5032]: E0122 17:27:55.295059 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="extract-utilities" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.295071 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="extract-utilities" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.295388 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9e2b438-91ae-489f-ba17-456445ad126a" containerName="registry-server" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.297561 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.310194 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-thdtf"] Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.388403 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-utilities\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.388483 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-catalog-content\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.388538 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2t9x\" (UniqueName: \"kubernetes.io/projected/6137401a-a790-4bab-b5a8-07bab59bdc5e-kube-api-access-n2t9x\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.490888 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-utilities\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.491194 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-catalog-content\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.491429 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2t9x\" (UniqueName: \"kubernetes.io/projected/6137401a-a790-4bab-b5a8-07bab59bdc5e-kube-api-access-n2t9x\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.492693 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-utilities\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.493366 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-catalog-content\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.509945 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2t9x\" (UniqueName: \"kubernetes.io/projected/6137401a-a790-4bab-b5a8-07bab59bdc5e-kube-api-access-n2t9x\") pod \"redhat-operators-thdtf\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:55 crc kubenswrapper[5032]: I0122 17:27:55.635910 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:27:56 crc kubenswrapper[5032]: I0122 17:27:56.125940 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-thdtf"] Jan 22 17:27:56 crc kubenswrapper[5032]: I0122 17:27:56.705244 5032 generic.go:334] "Generic (PLEG): container finished" podID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerID="dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb" exitCode=0 Jan 22 17:27:56 crc kubenswrapper[5032]: I0122 17:27:56.705338 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-thdtf" event={"ID":"6137401a-a790-4bab-b5a8-07bab59bdc5e","Type":"ContainerDied","Data":"dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb"} Jan 22 17:27:56 crc kubenswrapper[5032]: I0122 17:27:56.705547 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-thdtf" event={"ID":"6137401a-a790-4bab-b5a8-07bab59bdc5e","Type":"ContainerStarted","Data":"5a867591774183b6bd730ef5121cc23fe5ef47965537824f57990570f4a91db5"} Jan 22 17:27:56 crc kubenswrapper[5032]: I0122 17:27:56.706922 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:27:57 crc kubenswrapper[5032]: I0122 17:27:57.713090 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-thdtf" event={"ID":"6137401a-a790-4bab-b5a8-07bab59bdc5e","Type":"ContainerStarted","Data":"093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f"} Jan 22 17:28:04 crc kubenswrapper[5032]: I0122 17:28:04.803922 5032 generic.go:334] "Generic (PLEG): container finished" podID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerID="093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f" exitCode=0 Jan 22 17:28:04 crc kubenswrapper[5032]: I0122 17:28:04.803997 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-thdtf" event={"ID":"6137401a-a790-4bab-b5a8-07bab59bdc5e","Type":"ContainerDied","Data":"093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f"} Jan 22 17:28:05 crc kubenswrapper[5032]: I0122 17:28:05.817441 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-thdtf" event={"ID":"6137401a-a790-4bab-b5a8-07bab59bdc5e","Type":"ContainerStarted","Data":"4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0"} Jan 22 17:28:05 crc kubenswrapper[5032]: I0122 17:28:05.839129 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-thdtf" podStartSLOduration=2.352299132 podStartE2EDuration="10.839111283s" podCreationTimestamp="2026-01-22 17:27:55 +0000 UTC" firstStartedPulling="2026-01-22 17:27:56.70672077 +0000 UTC m=+4980.518889801" lastFinishedPulling="2026-01-22 17:28:05.193532891 +0000 UTC m=+4989.005701952" observedRunningTime="2026-01-22 17:28:05.834709586 +0000 UTC m=+4989.646878617" watchObservedRunningTime="2026-01-22 17:28:05.839111283 +0000 UTC m=+4989.651280314" Jan 22 17:28:15 crc kubenswrapper[5032]: I0122 17:28:15.637127 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:28:15 crc kubenswrapper[5032]: I0122 17:28:15.637792 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:28:15 crc kubenswrapper[5032]: I0122 17:28:15.730140 5032 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-znmml" podUID="f431be2d-ceb6-4b82-a770-6827a13d1bb1" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 22 17:28:16 crc kubenswrapper[5032]: I0122 17:28:16.499128 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:28:16 crc kubenswrapper[5032]: I0122 17:28:16.563160 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:28:16 crc kubenswrapper[5032]: I0122 17:28:16.752526 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-thdtf"] Jan 22 17:28:17 crc kubenswrapper[5032]: I0122 17:28:17.922404 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-thdtf" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="registry-server" containerID="cri-o://4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0" gracePeriod=2 Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.901057 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.956441 5032 generic.go:334] "Generic (PLEG): container finished" podID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerID="4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0" exitCode=0 Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.956504 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-thdtf" event={"ID":"6137401a-a790-4bab-b5a8-07bab59bdc5e","Type":"ContainerDied","Data":"4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0"} Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.956532 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-thdtf" event={"ID":"6137401a-a790-4bab-b5a8-07bab59bdc5e","Type":"ContainerDied","Data":"5a867591774183b6bd730ef5121cc23fe5ef47965537824f57990570f4a91db5"} Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.956550 5032 scope.go:117] "RemoveContainer" containerID="4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0" Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.956762 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-thdtf" Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.967178 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2t9x\" (UniqueName: \"kubernetes.io/projected/6137401a-a790-4bab-b5a8-07bab59bdc5e-kube-api-access-n2t9x\") pod \"6137401a-a790-4bab-b5a8-07bab59bdc5e\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.967282 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-utilities\") pod \"6137401a-a790-4bab-b5a8-07bab59bdc5e\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.967353 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-catalog-content\") pod \"6137401a-a790-4bab-b5a8-07bab59bdc5e\" (UID: \"6137401a-a790-4bab-b5a8-07bab59bdc5e\") " Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.969130 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-utilities" (OuterVolumeSpecName: "utilities") pod "6137401a-a790-4bab-b5a8-07bab59bdc5e" (UID: "6137401a-a790-4bab-b5a8-07bab59bdc5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.984403 5032 scope.go:117] "RemoveContainer" containerID="093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f" Jan 22 17:28:18 crc kubenswrapper[5032]: I0122 17:28:18.992487 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6137401a-a790-4bab-b5a8-07bab59bdc5e-kube-api-access-n2t9x" (OuterVolumeSpecName: "kube-api-access-n2t9x") pod "6137401a-a790-4bab-b5a8-07bab59bdc5e" (UID: "6137401a-a790-4bab-b5a8-07bab59bdc5e"). InnerVolumeSpecName "kube-api-access-n2t9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.045344 5032 scope.go:117] "RemoveContainer" containerID="dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.069604 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2t9x\" (UniqueName: \"kubernetes.io/projected/6137401a-a790-4bab-b5a8-07bab59bdc5e-kube-api-access-n2t9x\") on node \"crc\" DevicePath \"\"" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.069656 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.086943 5032 scope.go:117] "RemoveContainer" containerID="4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0" Jan 22 17:28:19 crc kubenswrapper[5032]: E0122 17:28:19.088079 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0\": container with ID starting with 4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0 not found: ID does not exist" containerID="4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.088135 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0"} err="failed to get container status \"4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0\": rpc error: code = NotFound desc = could not find container \"4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0\": container with ID starting with 4bca1ef80b246db4a77fc5d98135e6849ff7cb4ec2f50152f3c32ec2e08635f0 not found: ID does not exist" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.088168 5032 scope.go:117] "RemoveContainer" containerID="093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.088385 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6137401a-a790-4bab-b5a8-07bab59bdc5e" (UID: "6137401a-a790-4bab-b5a8-07bab59bdc5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:28:19 crc kubenswrapper[5032]: E0122 17:28:19.088607 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f\": container with ID starting with 093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f not found: ID does not exist" containerID="093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.088638 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f"} err="failed to get container status \"093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f\": rpc error: code = NotFound desc = could not find container \"093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f\": container with ID starting with 093052a96d84b2cae42811fb9432b81d84c6d23133d7f4d28d7ded997ec6419f not found: ID does not exist" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.088660 5032 scope.go:117] "RemoveContainer" containerID="dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb" Jan 22 17:28:19 crc kubenswrapper[5032]: E0122 17:28:19.089020 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb\": container with ID starting with dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb not found: ID does not exist" containerID="dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.089039 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb"} err="failed to get container status \"dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb\": rpc error: code = NotFound desc = could not find container \"dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb\": container with ID starting with dcfde7bc1653655798400872eb38ab99fa56a818b043c83a4d5ec27c58630fcb not found: ID does not exist" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.171623 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6137401a-a790-4bab-b5a8-07bab59bdc5e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.310421 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-thdtf"] Jan 22 17:28:19 crc kubenswrapper[5032]: I0122 17:28:19.323623 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-thdtf"] Jan 22 17:28:20 crc kubenswrapper[5032]: I0122 17:28:20.466903 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" path="/var/lib/kubelet/pods/6137401a-a790-4bab-b5a8-07bab59bdc5e/volumes" Jan 22 17:28:33 crc kubenswrapper[5032]: I0122 17:28:33.016635 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:28:33 crc kubenswrapper[5032]: I0122 17:28:33.017092 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:29:03 crc kubenswrapper[5032]: I0122 17:29:03.016922 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:29:03 crc kubenswrapper[5032]: I0122 17:29:03.017601 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.640762 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8kpkb"] Jan 22 17:29:14 crc kubenswrapper[5032]: E0122 17:29:14.641902 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="extract-content" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.641918 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="extract-content" Jan 22 17:29:14 crc kubenswrapper[5032]: E0122 17:29:14.641951 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="extract-utilities" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.641960 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="extract-utilities" Jan 22 17:29:14 crc kubenswrapper[5032]: E0122 17:29:14.641972 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="registry-server" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.641983 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="registry-server" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.642221 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6137401a-a790-4bab-b5a8-07bab59bdc5e" containerName="registry-server" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.644054 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.656584 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8kpkb"] Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.750731 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-catalog-content\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.751000 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-utilities\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.751076 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcz5x\" (UniqueName: \"kubernetes.io/projected/8ff24201-6be5-4fbf-baa0-672ffb6759be-kube-api-access-zcz5x\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.853705 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-utilities\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.853763 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcz5x\" (UniqueName: \"kubernetes.io/projected/8ff24201-6be5-4fbf-baa0-672ffb6759be-kube-api-access-zcz5x\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.853933 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-catalog-content\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.854329 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-utilities\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.854414 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-catalog-content\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.905789 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcz5x\" (UniqueName: \"kubernetes.io/projected/8ff24201-6be5-4fbf-baa0-672ffb6759be-kube-api-access-zcz5x\") pod \"redhat-marketplace-8kpkb\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:14 crc kubenswrapper[5032]: I0122 17:29:14.963590 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:15 crc kubenswrapper[5032]: I0122 17:29:15.474369 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8kpkb"] Jan 22 17:29:15 crc kubenswrapper[5032]: I0122 17:29:15.491790 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8kpkb" event={"ID":"8ff24201-6be5-4fbf-baa0-672ffb6759be","Type":"ContainerStarted","Data":"2d8253ebb8d9606983ca0a0870f5f905134103e209b69b1efaa40437b7b61e3d"} Jan 22 17:29:16 crc kubenswrapper[5032]: I0122 17:29:16.510043 5032 generic.go:334] "Generic (PLEG): container finished" podID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerID="f28463ee3fd5a339cbd4a9e9ba7aa928fc563f59bc85747562ab4eccdd6de0fd" exitCode=0 Jan 22 17:29:16 crc kubenswrapper[5032]: I0122 17:29:16.510110 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8kpkb" event={"ID":"8ff24201-6be5-4fbf-baa0-672ffb6759be","Type":"ContainerDied","Data":"f28463ee3fd5a339cbd4a9e9ba7aa928fc563f59bc85747562ab4eccdd6de0fd"} Jan 22 17:29:19 crc kubenswrapper[5032]: I0122 17:29:19.536719 5032 generic.go:334] "Generic (PLEG): container finished" podID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerID="f006dbe43dd89c8eb3f4fa0f19651fb4bbd7ad77f0d03b7f488baa90241aaae5" exitCode=0 Jan 22 17:29:19 crc kubenswrapper[5032]: I0122 17:29:19.536760 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8kpkb" event={"ID":"8ff24201-6be5-4fbf-baa0-672ffb6759be","Type":"ContainerDied","Data":"f006dbe43dd89c8eb3f4fa0f19651fb4bbd7ad77f0d03b7f488baa90241aaae5"} Jan 22 17:29:20 crc kubenswrapper[5032]: I0122 17:29:20.549427 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8kpkb" event={"ID":"8ff24201-6be5-4fbf-baa0-672ffb6759be","Type":"ContainerStarted","Data":"d2368b8875f8ffd35742b79f9edb57d17bb0e207a2a8922cea612bce4652b96c"} Jan 22 17:29:20 crc kubenswrapper[5032]: I0122 17:29:20.575939 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8kpkb" podStartSLOduration=3.14288876 podStartE2EDuration="6.575911252s" podCreationTimestamp="2026-01-22 17:29:14 +0000 UTC" firstStartedPulling="2026-01-22 17:29:16.512568023 +0000 UTC m=+5060.324737054" lastFinishedPulling="2026-01-22 17:29:19.945590515 +0000 UTC m=+5063.757759546" observedRunningTime="2026-01-22 17:29:20.573747634 +0000 UTC m=+5064.385916705" watchObservedRunningTime="2026-01-22 17:29:20.575911252 +0000 UTC m=+5064.388080303" Jan 22 17:29:24 crc kubenswrapper[5032]: I0122 17:29:24.964541 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:24 crc kubenswrapper[5032]: I0122 17:29:24.965191 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:25 crc kubenswrapper[5032]: I0122 17:29:25.025274 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:25 crc kubenswrapper[5032]: I0122 17:29:25.636744 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:25 crc kubenswrapper[5032]: I0122 17:29:25.690491 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8kpkb"] Jan 22 17:29:27 crc kubenswrapper[5032]: I0122 17:29:27.613948 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8kpkb" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="registry-server" containerID="cri-o://d2368b8875f8ffd35742b79f9edb57d17bb0e207a2a8922cea612bce4652b96c" gracePeriod=2 Jan 22 17:29:29 crc kubenswrapper[5032]: I0122 17:29:29.636564 5032 generic.go:334] "Generic (PLEG): container finished" podID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerID="d2368b8875f8ffd35742b79f9edb57d17bb0e207a2a8922cea612bce4652b96c" exitCode=0 Jan 22 17:29:29 crc kubenswrapper[5032]: I0122 17:29:29.636645 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8kpkb" event={"ID":"8ff24201-6be5-4fbf-baa0-672ffb6759be","Type":"ContainerDied","Data":"d2368b8875f8ffd35742b79f9edb57d17bb0e207a2a8922cea612bce4652b96c"} Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.320742 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.353194 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-catalog-content\") pod \"8ff24201-6be5-4fbf-baa0-672ffb6759be\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.353236 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcz5x\" (UniqueName: \"kubernetes.io/projected/8ff24201-6be5-4fbf-baa0-672ffb6759be-kube-api-access-zcz5x\") pod \"8ff24201-6be5-4fbf-baa0-672ffb6759be\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.353259 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-utilities\") pod \"8ff24201-6be5-4fbf-baa0-672ffb6759be\" (UID: \"8ff24201-6be5-4fbf-baa0-672ffb6759be\") " Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.354477 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-utilities" (OuterVolumeSpecName: "utilities") pod "8ff24201-6be5-4fbf-baa0-672ffb6759be" (UID: "8ff24201-6be5-4fbf-baa0-672ffb6759be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.364117 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ff24201-6be5-4fbf-baa0-672ffb6759be-kube-api-access-zcz5x" (OuterVolumeSpecName: "kube-api-access-zcz5x") pod "8ff24201-6be5-4fbf-baa0-672ffb6759be" (UID: "8ff24201-6be5-4fbf-baa0-672ffb6759be"). InnerVolumeSpecName "kube-api-access-zcz5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.379428 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ff24201-6be5-4fbf-baa0-672ffb6759be" (UID: "8ff24201-6be5-4fbf-baa0-672ffb6759be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.455649 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.455909 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcz5x\" (UniqueName: \"kubernetes.io/projected/8ff24201-6be5-4fbf-baa0-672ffb6759be-kube-api-access-zcz5x\") on node \"crc\" DevicePath \"\"" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.456033 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ff24201-6be5-4fbf-baa0-672ffb6759be-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.649762 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8kpkb" event={"ID":"8ff24201-6be5-4fbf-baa0-672ffb6759be","Type":"ContainerDied","Data":"2d8253ebb8d9606983ca0a0870f5f905134103e209b69b1efaa40437b7b61e3d"} Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.649822 5032 scope.go:117] "RemoveContainer" containerID="d2368b8875f8ffd35742b79f9edb57d17bb0e207a2a8922cea612bce4652b96c" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.649928 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8kpkb" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.678756 5032 scope.go:117] "RemoveContainer" containerID="f006dbe43dd89c8eb3f4fa0f19651fb4bbd7ad77f0d03b7f488baa90241aaae5" Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.712920 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8kpkb"] Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.735207 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8kpkb"] Jan 22 17:29:30 crc kubenswrapper[5032]: I0122 17:29:30.755059 5032 scope.go:117] "RemoveContainer" containerID="f28463ee3fd5a339cbd4a9e9ba7aa928fc563f59bc85747562ab4eccdd6de0fd" Jan 22 17:29:32 crc kubenswrapper[5032]: I0122 17:29:32.473301 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" path="/var/lib/kubelet/pods/8ff24201-6be5-4fbf-baa0-672ffb6759be/volumes" Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.017119 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.017443 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.017506 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.018548 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.018649 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" gracePeriod=600 Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.683255 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" exitCode=0 Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.683328 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5"} Jan 22 17:29:33 crc kubenswrapper[5032]: I0122 17:29:33.683403 5032 scope.go:117] "RemoveContainer" containerID="47826c9907e42fffe43858bf07c077a1f8d85a713f730bd3570ca8f4a3390e47" Jan 22 17:29:33 crc kubenswrapper[5032]: E0122 17:29:33.849435 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:29:34 crc kubenswrapper[5032]: I0122 17:29:34.694588 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:29:34 crc kubenswrapper[5032]: E0122 17:29:34.695189 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:29:45 crc kubenswrapper[5032]: I0122 17:29:45.457695 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:29:45 crc kubenswrapper[5032]: E0122 17:29:45.458464 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:29:57 crc kubenswrapper[5032]: I0122 17:29:57.454370 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:29:57 crc kubenswrapper[5032]: E0122 17:29:57.455007 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.147909 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f"] Jan 22 17:30:00 crc kubenswrapper[5032]: E0122 17:30:00.148711 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="extract-content" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.148727 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="extract-content" Jan 22 17:30:00 crc kubenswrapper[5032]: E0122 17:30:00.148746 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="registry-server" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.148754 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="registry-server" Jan 22 17:30:00 crc kubenswrapper[5032]: E0122 17:30:00.148769 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="extract-utilities" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.148779 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="extract-utilities" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.149103 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ff24201-6be5-4fbf-baa0-672ffb6759be" containerName="registry-server" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.149891 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.151924 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.152000 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.157476 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f"] Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.272306 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd0dc812-d638-48d0-96f3-ecc27b10dcac-config-volume\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.272529 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd0dc812-d638-48d0-96f3-ecc27b10dcac-secret-volume\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.272631 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvv5d\" (UniqueName: \"kubernetes.io/projected/cd0dc812-d638-48d0-96f3-ecc27b10dcac-kube-api-access-cvv5d\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.374449 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd0dc812-d638-48d0-96f3-ecc27b10dcac-config-volume\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.374536 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd0dc812-d638-48d0-96f3-ecc27b10dcac-secret-volume\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.374572 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvv5d\" (UniqueName: \"kubernetes.io/projected/cd0dc812-d638-48d0-96f3-ecc27b10dcac-kube-api-access-cvv5d\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.375405 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd0dc812-d638-48d0-96f3-ecc27b10dcac-config-volume\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.380266 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd0dc812-d638-48d0-96f3-ecc27b10dcac-secret-volume\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.392550 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvv5d\" (UniqueName: \"kubernetes.io/projected/cd0dc812-d638-48d0-96f3-ecc27b10dcac-kube-api-access-cvv5d\") pod \"collect-profiles-29485050-j6v5f\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.475163 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:00 crc kubenswrapper[5032]: W0122 17:30:00.928428 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd0dc812_d638_48d0_96f3_ecc27b10dcac.slice/crio-9c639f05f0dd8c7ed90d880c8aa8bbb7e3f3c4dd3d472f78a4ae66d75f90a795 WatchSource:0}: Error finding container 9c639f05f0dd8c7ed90d880c8aa8bbb7e3f3c4dd3d472f78a4ae66d75f90a795: Status 404 returned error can't find the container with id 9c639f05f0dd8c7ed90d880c8aa8bbb7e3f3c4dd3d472f78a4ae66d75f90a795 Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.928517 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f"] Jan 22 17:30:00 crc kubenswrapper[5032]: I0122 17:30:00.968478 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" event={"ID":"cd0dc812-d638-48d0-96f3-ecc27b10dcac","Type":"ContainerStarted","Data":"9c639f05f0dd8c7ed90d880c8aa8bbb7e3f3c4dd3d472f78a4ae66d75f90a795"} Jan 22 17:30:01 crc kubenswrapper[5032]: I0122 17:30:01.985846 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" event={"ID":"cd0dc812-d638-48d0-96f3-ecc27b10dcac","Type":"ContainerStarted","Data":"b975138e99b58f16f3694a59b01d4672f6665db5986e9711de548eb8ba8c9a7e"} Jan 22 17:30:02 crc kubenswrapper[5032]: I0122 17:30:02.998778 5032 generic.go:334] "Generic (PLEG): container finished" podID="cd0dc812-d638-48d0-96f3-ecc27b10dcac" containerID="b975138e99b58f16f3694a59b01d4672f6665db5986e9711de548eb8ba8c9a7e" exitCode=0 Jan 22 17:30:02 crc kubenswrapper[5032]: I0122 17:30:02.998972 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" event={"ID":"cd0dc812-d638-48d0-96f3-ecc27b10dcac","Type":"ContainerDied","Data":"b975138e99b58f16f3694a59b01d4672f6665db5986e9711de548eb8ba8c9a7e"} Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.420642 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.528524 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd0dc812-d638-48d0-96f3-ecc27b10dcac-config-volume\") pod \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.528623 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd0dc812-d638-48d0-96f3-ecc27b10dcac-secret-volume\") pod \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.528700 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvv5d\" (UniqueName: \"kubernetes.io/projected/cd0dc812-d638-48d0-96f3-ecc27b10dcac-kube-api-access-cvv5d\") pod \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\" (UID: \"cd0dc812-d638-48d0-96f3-ecc27b10dcac\") " Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.529507 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd0dc812-d638-48d0-96f3-ecc27b10dcac-config-volume" (OuterVolumeSpecName: "config-volume") pod "cd0dc812-d638-48d0-96f3-ecc27b10dcac" (UID: "cd0dc812-d638-48d0-96f3-ecc27b10dcac"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.535350 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd0dc812-d638-48d0-96f3-ecc27b10dcac-kube-api-access-cvv5d" (OuterVolumeSpecName: "kube-api-access-cvv5d") pod "cd0dc812-d638-48d0-96f3-ecc27b10dcac" (UID: "cd0dc812-d638-48d0-96f3-ecc27b10dcac"). InnerVolumeSpecName "kube-api-access-cvv5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.536189 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd0dc812-d638-48d0-96f3-ecc27b10dcac-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cd0dc812-d638-48d0-96f3-ecc27b10dcac" (UID: "cd0dc812-d638-48d0-96f3-ecc27b10dcac"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.631372 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvv5d\" (UniqueName: \"kubernetes.io/projected/cd0dc812-d638-48d0-96f3-ecc27b10dcac-kube-api-access-cvv5d\") on node \"crc\" DevicePath \"\"" Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.631411 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd0dc812-d638-48d0-96f3-ecc27b10dcac-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:30:03 crc kubenswrapper[5032]: I0122 17:30:03.631420 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd0dc812-d638-48d0-96f3-ecc27b10dcac-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:30:04 crc kubenswrapper[5032]: I0122 17:30:04.008692 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" event={"ID":"cd0dc812-d638-48d0-96f3-ecc27b10dcac","Type":"ContainerDied","Data":"9c639f05f0dd8c7ed90d880c8aa8bbb7e3f3c4dd3d472f78a4ae66d75f90a795"} Jan 22 17:30:04 crc kubenswrapper[5032]: I0122 17:30:04.009040 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c639f05f0dd8c7ed90d880c8aa8bbb7e3f3c4dd3d472f78a4ae66d75f90a795" Jan 22 17:30:04 crc kubenswrapper[5032]: I0122 17:30:04.008733 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f" Jan 22 17:30:04 crc kubenswrapper[5032]: I0122 17:30:04.503436 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km"] Jan 22 17:30:04 crc kubenswrapper[5032]: I0122 17:30:04.511830 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485005-rl2km"] Jan 22 17:30:06 crc kubenswrapper[5032]: I0122 17:30:06.464545 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47319913-434f-4ed0-8c34-9ce2385770b6" path="/var/lib/kubelet/pods/47319913-434f-4ed0-8c34-9ce2385770b6/volumes" Jan 22 17:30:12 crc kubenswrapper[5032]: I0122 17:30:12.454636 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:30:12 crc kubenswrapper[5032]: E0122 17:30:12.455471 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:30:21 crc kubenswrapper[5032]: I0122 17:30:21.150979 5032 scope.go:117] "RemoveContainer" containerID="73b09e38a774f1e7384569368f65c504d0e2d248978bdeb20a753708f6b26c74" Jan 22 17:30:23 crc kubenswrapper[5032]: I0122 17:30:23.454666 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:30:23 crc kubenswrapper[5032]: E0122 17:30:23.455183 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:30:34 crc kubenswrapper[5032]: I0122 17:30:34.454590 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:30:34 crc kubenswrapper[5032]: E0122 17:30:34.455480 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:30:47 crc kubenswrapper[5032]: I0122 17:30:47.455533 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:30:47 crc kubenswrapper[5032]: E0122 17:30:47.457606 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:30:59 crc kubenswrapper[5032]: I0122 17:30:59.454893 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:30:59 crc kubenswrapper[5032]: E0122 17:30:59.455513 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:31:12 crc kubenswrapper[5032]: I0122 17:31:12.455297 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:31:12 crc kubenswrapper[5032]: E0122 17:31:12.456310 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:31:25 crc kubenswrapper[5032]: I0122 17:31:25.454972 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:31:25 crc kubenswrapper[5032]: E0122 17:31:25.456194 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:31:40 crc kubenswrapper[5032]: I0122 17:31:40.455939 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:31:40 crc kubenswrapper[5032]: E0122 17:31:40.456962 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:31:53 crc kubenswrapper[5032]: I0122 17:31:53.456474 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:31:53 crc kubenswrapper[5032]: E0122 17:31:53.457659 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:32:06 crc kubenswrapper[5032]: I0122 17:32:06.465678 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:32:06 crc kubenswrapper[5032]: E0122 17:32:06.466932 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:32:20 crc kubenswrapper[5032]: I0122 17:32:20.454951 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:32:20 crc kubenswrapper[5032]: E0122 17:32:20.455681 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:32:33 crc kubenswrapper[5032]: I0122 17:32:33.454465 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:32:33 crc kubenswrapper[5032]: E0122 17:32:33.455206 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:32:36 crc kubenswrapper[5032]: I0122 17:32:36.449163 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="e831a62a-4003-41ec-b3ac-ba0d29a91d95" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.168:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 17:32:46 crc kubenswrapper[5032]: I0122 17:32:46.461607 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:32:46 crc kubenswrapper[5032]: E0122 17:32:46.462452 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:32:55 crc kubenswrapper[5032]: I0122 17:32:55.733731 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 22 17:32:55 crc kubenswrapper[5032]: I0122 17:32:55.736626 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="89f96057-2096-47e7-88c3-bdeab76e4fe6" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 22 17:33:00 crc kubenswrapper[5032]: I0122 17:33:00.455145 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:33:00 crc kubenswrapper[5032]: E0122 17:33:00.455776 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:33:14 crc kubenswrapper[5032]: I0122 17:33:14.454724 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:33:14 crc kubenswrapper[5032]: E0122 17:33:14.455716 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:33:25 crc kubenswrapper[5032]: I0122 17:33:25.455235 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:33:25 crc kubenswrapper[5032]: E0122 17:33:25.456079 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:33:38 crc kubenswrapper[5032]: I0122 17:33:38.455056 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:33:38 crc kubenswrapper[5032]: E0122 17:33:38.455927 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:33:52 crc kubenswrapper[5032]: I0122 17:33:52.454391 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:33:52 crc kubenswrapper[5032]: E0122 17:33:52.456209 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:34:04 crc kubenswrapper[5032]: I0122 17:34:04.455549 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:34:04 crc kubenswrapper[5032]: E0122 17:34:04.456527 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:34:16 crc kubenswrapper[5032]: I0122 17:34:16.460755 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:34:16 crc kubenswrapper[5032]: E0122 17:34:16.461698 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.531892 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4l4v2"] Jan 22 17:34:19 crc kubenswrapper[5032]: E0122 17:34:19.532850 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd0dc812-d638-48d0-96f3-ecc27b10dcac" containerName="collect-profiles" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.532884 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd0dc812-d638-48d0-96f3-ecc27b10dcac" containerName="collect-profiles" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.533130 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd0dc812-d638-48d0-96f3-ecc27b10dcac" containerName="collect-profiles" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.534837 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.542043 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4l4v2"] Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.609176 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75419f93-5572-44b2-a1b3-ddc47833b19d-utilities\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.609310 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75419f93-5572-44b2-a1b3-ddc47833b19d-catalog-content\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.609341 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxcwc\" (UniqueName: \"kubernetes.io/projected/75419f93-5572-44b2-a1b3-ddc47833b19d-kube-api-access-dxcwc\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.710767 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75419f93-5572-44b2-a1b3-ddc47833b19d-utilities\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.710928 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75419f93-5572-44b2-a1b3-ddc47833b19d-catalog-content\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.710960 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxcwc\" (UniqueName: \"kubernetes.io/projected/75419f93-5572-44b2-a1b3-ddc47833b19d-kube-api-access-dxcwc\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.711435 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75419f93-5572-44b2-a1b3-ddc47833b19d-catalog-content\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.711432 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75419f93-5572-44b2-a1b3-ddc47833b19d-utilities\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.721997 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s9dn4"] Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.723827 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.738632 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s9dn4"] Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.813254 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2tsd\" (UniqueName: \"kubernetes.io/projected/246271df-1659-4521-8f1f-71d7709c0ae9-kube-api-access-m2tsd\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.813345 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-utilities\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.813546 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-catalog-content\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.915545 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-catalog-content\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.915733 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2tsd\" (UniqueName: \"kubernetes.io/projected/246271df-1659-4521-8f1f-71d7709c0ae9-kube-api-access-m2tsd\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.915800 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-utilities\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.916042 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-catalog-content\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:19 crc kubenswrapper[5032]: I0122 17:34:19.916243 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-utilities\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:20 crc kubenswrapper[5032]: I0122 17:34:20.229219 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxcwc\" (UniqueName: \"kubernetes.io/projected/75419f93-5572-44b2-a1b3-ddc47833b19d-kube-api-access-dxcwc\") pod \"community-operators-4l4v2\" (UID: \"75419f93-5572-44b2-a1b3-ddc47833b19d\") " pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:20 crc kubenswrapper[5032]: I0122 17:34:20.243675 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2tsd\" (UniqueName: \"kubernetes.io/projected/246271df-1659-4521-8f1f-71d7709c0ae9-kube-api-access-m2tsd\") pod \"certified-operators-s9dn4\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:20 crc kubenswrapper[5032]: I0122 17:34:20.341417 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:20 crc kubenswrapper[5032]: I0122 17:34:20.453075 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:20 crc kubenswrapper[5032]: I0122 17:34:20.875716 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s9dn4"] Jan 22 17:34:20 crc kubenswrapper[5032]: W0122 17:34:20.879664 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod246271df_1659_4521_8f1f_71d7709c0ae9.slice/crio-06928a4219fac5ed3020149d236ed4643aac3238702f530d3ead8b42e144e017 WatchSource:0}: Error finding container 06928a4219fac5ed3020149d236ed4643aac3238702f530d3ead8b42e144e017: Status 404 returned error can't find the container with id 06928a4219fac5ed3020149d236ed4643aac3238702f530d3ead8b42e144e017 Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.010936 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4l4v2"] Jan 22 17:34:21 crc kubenswrapper[5032]: W0122 17:34:21.018119 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75419f93_5572_44b2_a1b3_ddc47833b19d.slice/crio-47f4840d07b5243e3fc4bd9cc13db82e7ddc6010fe4433f3d9ed42ccf8b21b7e WatchSource:0}: Error finding container 47f4840d07b5243e3fc4bd9cc13db82e7ddc6010fe4433f3d9ed42ccf8b21b7e: Status 404 returned error can't find the container with id 47f4840d07b5243e3fc4bd9cc13db82e7ddc6010fe4433f3d9ed42ccf8b21b7e Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.425755 5032 generic.go:334] "Generic (PLEG): container finished" podID="75419f93-5572-44b2-a1b3-ddc47833b19d" containerID="70bdbec13155ae5f58a3d1f97ab6a881a6f43b8d48670ddc0e80f8f8ea7a4362" exitCode=0 Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.425855 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l4v2" event={"ID":"75419f93-5572-44b2-a1b3-ddc47833b19d","Type":"ContainerDied","Data":"70bdbec13155ae5f58a3d1f97ab6a881a6f43b8d48670ddc0e80f8f8ea7a4362"} Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.426124 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l4v2" event={"ID":"75419f93-5572-44b2-a1b3-ddc47833b19d","Type":"ContainerStarted","Data":"47f4840d07b5243e3fc4bd9cc13db82e7ddc6010fe4433f3d9ed42ccf8b21b7e"} Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.428213 5032 generic.go:334] "Generic (PLEG): container finished" podID="246271df-1659-4521-8f1f-71d7709c0ae9" containerID="d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26" exitCode=0 Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.428259 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9dn4" event={"ID":"246271df-1659-4521-8f1f-71d7709c0ae9","Type":"ContainerDied","Data":"d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26"} Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.428294 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9dn4" event={"ID":"246271df-1659-4521-8f1f-71d7709c0ae9","Type":"ContainerStarted","Data":"06928a4219fac5ed3020149d236ed4643aac3238702f530d3ead8b42e144e017"} Jan 22 17:34:21 crc kubenswrapper[5032]: I0122 17:34:21.428905 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:34:22 crc kubenswrapper[5032]: I0122 17:34:22.441085 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9dn4" event={"ID":"246271df-1659-4521-8f1f-71d7709c0ae9","Type":"ContainerStarted","Data":"8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8"} Jan 22 17:34:23 crc kubenswrapper[5032]: I0122 17:34:23.453844 5032 generic.go:334] "Generic (PLEG): container finished" podID="246271df-1659-4521-8f1f-71d7709c0ae9" containerID="8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8" exitCode=0 Jan 22 17:34:23 crc kubenswrapper[5032]: I0122 17:34:23.454157 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9dn4" event={"ID":"246271df-1659-4521-8f1f-71d7709c0ae9","Type":"ContainerDied","Data":"8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8"} Jan 22 17:34:24 crc kubenswrapper[5032]: I0122 17:34:24.469627 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9dn4" event={"ID":"246271df-1659-4521-8f1f-71d7709c0ae9","Type":"ContainerStarted","Data":"4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb"} Jan 22 17:34:24 crc kubenswrapper[5032]: I0122 17:34:24.488087 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s9dn4" podStartSLOduration=3.038980197 podStartE2EDuration="5.488064761s" podCreationTimestamp="2026-01-22 17:34:19 +0000 UTC" firstStartedPulling="2026-01-22 17:34:21.429479215 +0000 UTC m=+5365.241648246" lastFinishedPulling="2026-01-22 17:34:23.878563779 +0000 UTC m=+5367.690732810" observedRunningTime="2026-01-22 17:34:24.485189135 +0000 UTC m=+5368.297358176" watchObservedRunningTime="2026-01-22 17:34:24.488064761 +0000 UTC m=+5368.300233792" Jan 22 17:34:28 crc kubenswrapper[5032]: I0122 17:34:28.454802 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:34:28 crc kubenswrapper[5032]: E0122 17:34:28.455955 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:34:29 crc kubenswrapper[5032]: I0122 17:34:29.526906 5032 generic.go:334] "Generic (PLEG): container finished" podID="75419f93-5572-44b2-a1b3-ddc47833b19d" containerID="5af06c638603b73d19c05023a2d5484828f7895f153f36ccc0a5094b2f324f78" exitCode=0 Jan 22 17:34:29 crc kubenswrapper[5032]: I0122 17:34:29.526960 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l4v2" event={"ID":"75419f93-5572-44b2-a1b3-ddc47833b19d","Type":"ContainerDied","Data":"5af06c638603b73d19c05023a2d5484828f7895f153f36ccc0a5094b2f324f78"} Jan 22 17:34:30 crc kubenswrapper[5032]: I0122 17:34:30.341636 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:30 crc kubenswrapper[5032]: I0122 17:34:30.341977 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:30 crc kubenswrapper[5032]: I0122 17:34:30.401086 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:30 crc kubenswrapper[5032]: I0122 17:34:30.539495 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l4v2" event={"ID":"75419f93-5572-44b2-a1b3-ddc47833b19d","Type":"ContainerStarted","Data":"3d13826f827d89acf4173d5aedc06ffd92792deff5f7b2d6e71916f5bac5da72"} Jan 22 17:34:31 crc kubenswrapper[5032]: I0122 17:34:31.282982 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:31 crc kubenswrapper[5032]: I0122 17:34:31.348850 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s9dn4"] Jan 22 17:34:31 crc kubenswrapper[5032]: I0122 17:34:31.567698 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4l4v2" podStartSLOduration=3.694014921 podStartE2EDuration="12.567667307s" podCreationTimestamp="2026-01-22 17:34:19 +0000 UTC" firstStartedPulling="2026-01-22 17:34:21.428595231 +0000 UTC m=+5365.240764262" lastFinishedPulling="2026-01-22 17:34:30.302247627 +0000 UTC m=+5374.114416648" observedRunningTime="2026-01-22 17:34:31.561360559 +0000 UTC m=+5375.373529610" watchObservedRunningTime="2026-01-22 17:34:31.567667307 +0000 UTC m=+5375.379836348" Jan 22 17:34:32 crc kubenswrapper[5032]: I0122 17:34:32.558440 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s9dn4" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="registry-server" containerID="cri-o://4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb" gracePeriod=2 Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.400416 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.497918 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2tsd\" (UniqueName: \"kubernetes.io/projected/246271df-1659-4521-8f1f-71d7709c0ae9-kube-api-access-m2tsd\") pod \"246271df-1659-4521-8f1f-71d7709c0ae9\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.498148 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-utilities\") pod \"246271df-1659-4521-8f1f-71d7709c0ae9\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.498222 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-catalog-content\") pod \"246271df-1659-4521-8f1f-71d7709c0ae9\" (UID: \"246271df-1659-4521-8f1f-71d7709c0ae9\") " Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.499232 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-utilities" (OuterVolumeSpecName: "utilities") pod "246271df-1659-4521-8f1f-71d7709c0ae9" (UID: "246271df-1659-4521-8f1f-71d7709c0ae9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.505256 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/246271df-1659-4521-8f1f-71d7709c0ae9-kube-api-access-m2tsd" (OuterVolumeSpecName: "kube-api-access-m2tsd") pod "246271df-1659-4521-8f1f-71d7709c0ae9" (UID: "246271df-1659-4521-8f1f-71d7709c0ae9"). InnerVolumeSpecName "kube-api-access-m2tsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.557275 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "246271df-1659-4521-8f1f-71d7709c0ae9" (UID: "246271df-1659-4521-8f1f-71d7709c0ae9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.568539 5032 generic.go:334] "Generic (PLEG): container finished" podID="246271df-1659-4521-8f1f-71d7709c0ae9" containerID="4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb" exitCode=0 Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.568580 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9dn4" event={"ID":"246271df-1659-4521-8f1f-71d7709c0ae9","Type":"ContainerDied","Data":"4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb"} Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.568607 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9dn4" event={"ID":"246271df-1659-4521-8f1f-71d7709c0ae9","Type":"ContainerDied","Data":"06928a4219fac5ed3020149d236ed4643aac3238702f530d3ead8b42e144e017"} Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.568605 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9dn4" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.568621 5032 scope.go:117] "RemoveContainer" containerID="4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.600014 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.600045 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2tsd\" (UniqueName: \"kubernetes.io/projected/246271df-1659-4521-8f1f-71d7709c0ae9-kube-api-access-m2tsd\") on node \"crc\" DevicePath \"\"" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.600054 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/246271df-1659-4521-8f1f-71d7709c0ae9-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.604071 5032 scope.go:117] "RemoveContainer" containerID="8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.605213 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s9dn4"] Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.615638 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s9dn4"] Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.634331 5032 scope.go:117] "RemoveContainer" containerID="d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.660530 5032 scope.go:117] "RemoveContainer" containerID="4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb" Jan 22 17:34:33 crc kubenswrapper[5032]: E0122 17:34:33.660935 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb\": container with ID starting with 4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb not found: ID does not exist" containerID="4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.660969 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb"} err="failed to get container status \"4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb\": rpc error: code = NotFound desc = could not find container \"4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb\": container with ID starting with 4ddbf0bbd7da17c46647a2e5bc3159bc789ea3229ee64a1d8e7fb7fed4736dbb not found: ID does not exist" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.660988 5032 scope.go:117] "RemoveContainer" containerID="8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8" Jan 22 17:34:33 crc kubenswrapper[5032]: E0122 17:34:33.661358 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8\": container with ID starting with 8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8 not found: ID does not exist" containerID="8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.661377 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8"} err="failed to get container status \"8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8\": rpc error: code = NotFound desc = could not find container \"8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8\": container with ID starting with 8fd4e2dfba88b40a6b1ae008bfe6547f636460af19d83d04be7595bb2db74fd8 not found: ID does not exist" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.661390 5032 scope.go:117] "RemoveContainer" containerID="d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26" Jan 22 17:34:33 crc kubenswrapper[5032]: E0122 17:34:33.661563 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26\": container with ID starting with d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26 not found: ID does not exist" containerID="d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26" Jan 22 17:34:33 crc kubenswrapper[5032]: I0122 17:34:33.661584 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26"} err="failed to get container status \"d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26\": rpc error: code = NotFound desc = could not find container \"d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26\": container with ID starting with d9b7c8c5864ae0e9c778a0d8913682a0cfb42c80a4c88c288b41b709cf4cee26 not found: ID does not exist" Jan 22 17:34:34 crc kubenswrapper[5032]: I0122 17:34:34.467594 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" path="/var/lib/kubelet/pods/246271df-1659-4521-8f1f-71d7709c0ae9/volumes" Jan 22 17:34:39 crc kubenswrapper[5032]: I0122 17:34:39.454208 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.453559 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.468830 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.507628 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.634097 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"f39db55c10b9a8d1741b4c30249a2edc50e6c4dd1e76a1c0ed6f4e6ecba7bde1"} Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.691653 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4l4v2" Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.778050 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4l4v2"] Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.822389 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7st44"] Jan 22 17:34:40 crc kubenswrapper[5032]: I0122 17:34:40.822712 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7st44" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="registry-server" containerID="cri-o://07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635" gracePeriod=2 Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.352729 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7st44" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.476219 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-utilities\") pod \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.476454 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56smc\" (UniqueName: \"kubernetes.io/projected/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-kube-api-access-56smc\") pod \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.476514 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-catalog-content\") pod \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\" (UID: \"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382\") " Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.478219 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-utilities" (OuterVolumeSpecName: "utilities") pod "7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" (UID: "7a52f1b7-ce5b-4d2a-905b-1fdb1127b382"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.486431 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-kube-api-access-56smc" (OuterVolumeSpecName: "kube-api-access-56smc") pod "7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" (UID: "7a52f1b7-ce5b-4d2a-905b-1fdb1127b382"). InnerVolumeSpecName "kube-api-access-56smc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.555635 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" (UID: "7a52f1b7-ce5b-4d2a-905b-1fdb1127b382"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.580000 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.580062 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56smc\" (UniqueName: \"kubernetes.io/projected/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-kube-api-access-56smc\") on node \"crc\" DevicePath \"\"" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.580082 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.642847 5032 generic.go:334] "Generic (PLEG): container finished" podID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerID="07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635" exitCode=0 Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.642896 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7st44" event={"ID":"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382","Type":"ContainerDied","Data":"07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635"} Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.642987 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7st44" event={"ID":"7a52f1b7-ce5b-4d2a-905b-1fdb1127b382","Type":"ContainerDied","Data":"819a438297d3e18675c8ea80b7b562c786956a3b07bdae09931a6a616c5452ee"} Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.643013 5032 scope.go:117] "RemoveContainer" containerID="07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.643229 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7st44" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.675171 5032 scope.go:117] "RemoveContainer" containerID="ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.681627 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7st44"] Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.691016 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7st44"] Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.849071 5032 scope.go:117] "RemoveContainer" containerID="18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.895408 5032 scope.go:117] "RemoveContainer" containerID="07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635" Jan 22 17:34:41 crc kubenswrapper[5032]: E0122 17:34:41.895932 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635\": container with ID starting with 07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635 not found: ID does not exist" containerID="07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.895978 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635"} err="failed to get container status \"07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635\": rpc error: code = NotFound desc = could not find container \"07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635\": container with ID starting with 07512796b04d7b6c4cfadb4ba6a9a5ff741b9cb3b1e4713e5ade380ac29dd635 not found: ID does not exist" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.896123 5032 scope.go:117] "RemoveContainer" containerID="ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286" Jan 22 17:34:41 crc kubenswrapper[5032]: E0122 17:34:41.896746 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286\": container with ID starting with ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286 not found: ID does not exist" containerID="ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.896812 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286"} err="failed to get container status \"ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286\": rpc error: code = NotFound desc = could not find container \"ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286\": container with ID starting with ff8fbef42e4492d93b7c4ecb423a320ca5ae607a231c2f798c135ea2df17f286 not found: ID does not exist" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.896843 5032 scope.go:117] "RemoveContainer" containerID="18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71" Jan 22 17:34:41 crc kubenswrapper[5032]: E0122 17:34:41.897224 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71\": container with ID starting with 18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71 not found: ID does not exist" containerID="18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71" Jan 22 17:34:41 crc kubenswrapper[5032]: I0122 17:34:41.897252 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71"} err="failed to get container status \"18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71\": rpc error: code = NotFound desc = could not find container \"18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71\": container with ID starting with 18b46a2b5df1216930280579e078f003b8becff462bdd40c75a7e73a17dcaf71 not found: ID does not exist" Jan 22 17:34:42 crc kubenswrapper[5032]: I0122 17:34:42.472773 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" path="/var/lib/kubelet/pods/7a52f1b7-ce5b-4d2a-905b-1fdb1127b382/volumes" Jan 22 17:37:03 crc kubenswrapper[5032]: I0122 17:37:03.017135 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:37:03 crc kubenswrapper[5032]: I0122 17:37:03.017720 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:37:33 crc kubenswrapper[5032]: I0122 17:37:33.016657 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:37:33 crc kubenswrapper[5032]: I0122 17:37:33.017586 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.017347 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.019063 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.019235 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.020114 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f39db55c10b9a8d1741b4c30249a2edc50e6c4dd1e76a1c0ed6f4e6ecba7bde1"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.020270 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://f39db55c10b9a8d1741b4c30249a2edc50e6c4dd1e76a1c0ed6f4e6ecba7bde1" gracePeriod=600 Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.517521 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="f39db55c10b9a8d1741b4c30249a2edc50e6c4dd1e76a1c0ed6f4e6ecba7bde1" exitCode=0 Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.517616 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"f39db55c10b9a8d1741b4c30249a2edc50e6c4dd1e76a1c0ed6f4e6ecba7bde1"} Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.518007 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d"} Jan 22 17:38:03 crc kubenswrapper[5032]: I0122 17:38:03.518058 5032 scope.go:117] "RemoveContainer" containerID="8c44f2429d5ec0ead10eb27e32eb0c144bf8c9271296627af23240eb2f69f1b5" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.723500 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4t8kc"] Jan 22 17:39:23 crc kubenswrapper[5032]: E0122 17:39:23.727676 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="extract-utilities" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.727699 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="extract-utilities" Jan 22 17:39:23 crc kubenswrapper[5032]: E0122 17:39:23.727717 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="registry-server" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.727727 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="registry-server" Jan 22 17:39:23 crc kubenswrapper[5032]: E0122 17:39:23.727748 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="extract-utilities" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.727756 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="extract-utilities" Jan 22 17:39:23 crc kubenswrapper[5032]: E0122 17:39:23.727769 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="extract-content" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.727778 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="extract-content" Jan 22 17:39:23 crc kubenswrapper[5032]: E0122 17:39:23.727795 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="registry-server" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.727802 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="registry-server" Jan 22 17:39:23 crc kubenswrapper[5032]: E0122 17:39:23.727815 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="extract-content" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.727823 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="extract-content" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.728099 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a52f1b7-ce5b-4d2a-905b-1fdb1127b382" containerName="registry-server" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.728115 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="246271df-1659-4521-8f1f-71d7709c0ae9" containerName="registry-server" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.729733 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.744096 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4t8kc"] Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.816440 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-utilities\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.816634 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-catalog-content\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.816723 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjqm4\" (UniqueName: \"kubernetes.io/projected/835e7144-616a-4979-b3d0-e087caac67e4-kube-api-access-xjqm4\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.920676 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-utilities\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.921344 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-catalog-content\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.921509 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjqm4\" (UniqueName: \"kubernetes.io/projected/835e7144-616a-4979-b3d0-e087caac67e4-kube-api-access-xjqm4\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.921903 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-catalog-content\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.921916 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-utilities\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:23 crc kubenswrapper[5032]: I0122 17:39:23.947005 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjqm4\" (UniqueName: \"kubernetes.io/projected/835e7144-616a-4979-b3d0-e087caac67e4-kube-api-access-xjqm4\") pod \"redhat-operators-4t8kc\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:24 crc kubenswrapper[5032]: I0122 17:39:24.056745 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:24 crc kubenswrapper[5032]: I0122 17:39:24.547510 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4t8kc"] Jan 22 17:39:25 crc kubenswrapper[5032]: I0122 17:39:25.351227 5032 generic.go:334] "Generic (PLEG): container finished" podID="835e7144-616a-4979-b3d0-e087caac67e4" containerID="bbae39d0e2326dfdfbfa35cd901248cffd050786ab7799c300a199aba4ff250f" exitCode=0 Jan 22 17:39:25 crc kubenswrapper[5032]: I0122 17:39:25.351301 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4t8kc" event={"ID":"835e7144-616a-4979-b3d0-e087caac67e4","Type":"ContainerDied","Data":"bbae39d0e2326dfdfbfa35cd901248cffd050786ab7799c300a199aba4ff250f"} Jan 22 17:39:25 crc kubenswrapper[5032]: I0122 17:39:25.351593 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4t8kc" event={"ID":"835e7144-616a-4979-b3d0-e087caac67e4","Type":"ContainerStarted","Data":"b112639ef40156535137d8f9705956d1e1287c76cfb45e63824f0ebb095bbc74"} Jan 22 17:39:25 crc kubenswrapper[5032]: I0122 17:39:25.353596 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:39:26 crc kubenswrapper[5032]: I0122 17:39:26.366290 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4t8kc" event={"ID":"835e7144-616a-4979-b3d0-e087caac67e4","Type":"ContainerStarted","Data":"afe1a24d3b1450c599319fa23d79256a28ad7e72da8e510ca779ce5f2f6198c6"} Jan 22 17:39:29 crc kubenswrapper[5032]: I0122 17:39:29.393400 5032 generic.go:334] "Generic (PLEG): container finished" podID="835e7144-616a-4979-b3d0-e087caac67e4" containerID="afe1a24d3b1450c599319fa23d79256a28ad7e72da8e510ca779ce5f2f6198c6" exitCode=0 Jan 22 17:39:29 crc kubenswrapper[5032]: I0122 17:39:29.393460 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4t8kc" event={"ID":"835e7144-616a-4979-b3d0-e087caac67e4","Type":"ContainerDied","Data":"afe1a24d3b1450c599319fa23d79256a28ad7e72da8e510ca779ce5f2f6198c6"} Jan 22 17:39:31 crc kubenswrapper[5032]: I0122 17:39:31.417800 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4t8kc" event={"ID":"835e7144-616a-4979-b3d0-e087caac67e4","Type":"ContainerStarted","Data":"d24781a8bf1fbeacaf22b8d714453c4d9fbb369b35aa38e44564e14653e8b40f"} Jan 22 17:39:31 crc kubenswrapper[5032]: I0122 17:39:31.452795 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4t8kc" podStartSLOduration=3.450550185 podStartE2EDuration="8.45277263s" podCreationTimestamp="2026-01-22 17:39:23 +0000 UTC" firstStartedPulling="2026-01-22 17:39:25.353304561 +0000 UTC m=+5669.165473582" lastFinishedPulling="2026-01-22 17:39:30.355526986 +0000 UTC m=+5674.167696027" observedRunningTime="2026-01-22 17:39:31.437938335 +0000 UTC m=+5675.250107386" watchObservedRunningTime="2026-01-22 17:39:31.45277263 +0000 UTC m=+5675.264941661" Jan 22 17:39:34 crc kubenswrapper[5032]: I0122 17:39:34.057805 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:34 crc kubenswrapper[5032]: I0122 17:39:34.058373 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:35 crc kubenswrapper[5032]: I0122 17:39:35.109139 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4t8kc" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="registry-server" probeResult="failure" output=< Jan 22 17:39:35 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 17:39:35 crc kubenswrapper[5032]: > Jan 22 17:39:44 crc kubenswrapper[5032]: I0122 17:39:44.105633 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:44 crc kubenswrapper[5032]: I0122 17:39:44.158995 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:44 crc kubenswrapper[5032]: I0122 17:39:44.338246 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4t8kc"] Jan 22 17:39:45 crc kubenswrapper[5032]: I0122 17:39:45.535931 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4t8kc" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="registry-server" containerID="cri-o://d24781a8bf1fbeacaf22b8d714453c4d9fbb369b35aa38e44564e14653e8b40f" gracePeriod=2 Jan 22 17:39:46 crc kubenswrapper[5032]: I0122 17:39:46.568925 5032 generic.go:334] "Generic (PLEG): container finished" podID="835e7144-616a-4979-b3d0-e087caac67e4" containerID="d24781a8bf1fbeacaf22b8d714453c4d9fbb369b35aa38e44564e14653e8b40f" exitCode=0 Jan 22 17:39:46 crc kubenswrapper[5032]: I0122 17:39:46.568975 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4t8kc" event={"ID":"835e7144-616a-4979-b3d0-e087caac67e4","Type":"ContainerDied","Data":"d24781a8bf1fbeacaf22b8d714453c4d9fbb369b35aa38e44564e14653e8b40f"} Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.468735 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.556355 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjqm4\" (UniqueName: \"kubernetes.io/projected/835e7144-616a-4979-b3d0-e087caac67e4-kube-api-access-xjqm4\") pod \"835e7144-616a-4979-b3d0-e087caac67e4\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.556933 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-catalog-content\") pod \"835e7144-616a-4979-b3d0-e087caac67e4\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.557016 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-utilities\") pod \"835e7144-616a-4979-b3d0-e087caac67e4\" (UID: \"835e7144-616a-4979-b3d0-e087caac67e4\") " Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.558257 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-utilities" (OuterVolumeSpecName: "utilities") pod "835e7144-616a-4979-b3d0-e087caac67e4" (UID: "835e7144-616a-4979-b3d0-e087caac67e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.558870 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.562374 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/835e7144-616a-4979-b3d0-e087caac67e4-kube-api-access-xjqm4" (OuterVolumeSpecName: "kube-api-access-xjqm4") pod "835e7144-616a-4979-b3d0-e087caac67e4" (UID: "835e7144-616a-4979-b3d0-e087caac67e4"). InnerVolumeSpecName "kube-api-access-xjqm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.578562 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4t8kc" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.578440 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4t8kc" event={"ID":"835e7144-616a-4979-b3d0-e087caac67e4","Type":"ContainerDied","Data":"b112639ef40156535137d8f9705956d1e1287c76cfb45e63824f0ebb095bbc74"} Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.580028 5032 scope.go:117] "RemoveContainer" containerID="d24781a8bf1fbeacaf22b8d714453c4d9fbb369b35aa38e44564e14653e8b40f" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.647921 5032 scope.go:117] "RemoveContainer" containerID="afe1a24d3b1450c599319fa23d79256a28ad7e72da8e510ca779ce5f2f6198c6" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.661561 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjqm4\" (UniqueName: \"kubernetes.io/projected/835e7144-616a-4979-b3d0-e087caac67e4-kube-api-access-xjqm4\") on node \"crc\" DevicePath \"\"" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.672686 5032 scope.go:117] "RemoveContainer" containerID="bbae39d0e2326dfdfbfa35cd901248cffd050786ab7799c300a199aba4ff250f" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.681403 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "835e7144-616a-4979-b3d0-e087caac67e4" (UID: "835e7144-616a-4979-b3d0-e087caac67e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.762488 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/835e7144-616a-4979-b3d0-e087caac67e4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.914277 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4t8kc"] Jan 22 17:39:47 crc kubenswrapper[5032]: I0122 17:39:47.925808 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4t8kc"] Jan 22 17:39:48 crc kubenswrapper[5032]: I0122 17:39:48.466135 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="835e7144-616a-4979-b3d0-e087caac67e4" path="/var/lib/kubelet/pods/835e7144-616a-4979-b3d0-e087caac67e4/volumes" Jan 22 17:40:03 crc kubenswrapper[5032]: I0122 17:40:03.016499 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:40:03 crc kubenswrapper[5032]: I0122 17:40:03.017123 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.615475 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mj2rk"] Jan 22 17:40:05 crc kubenswrapper[5032]: E0122 17:40:05.616432 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="registry-server" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.616454 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="registry-server" Jan 22 17:40:05 crc kubenswrapper[5032]: E0122 17:40:05.616496 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="extract-utilities" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.616508 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="extract-utilities" Jan 22 17:40:05 crc kubenswrapper[5032]: E0122 17:40:05.616541 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="extract-content" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.616552 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="extract-content" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.616846 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="835e7144-616a-4979-b3d0-e087caac67e4" containerName="registry-server" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.618882 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.637383 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mj2rk"] Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.722984 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-utilities\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.723055 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swcgx\" (UniqueName: \"kubernetes.io/projected/4587f03a-c140-4a5b-a4fd-274544074711-kube-api-access-swcgx\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.723105 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-catalog-content\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.824317 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swcgx\" (UniqueName: \"kubernetes.io/projected/4587f03a-c140-4a5b-a4fd-274544074711-kube-api-access-swcgx\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.824383 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-catalog-content\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.824522 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-utilities\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.824982 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-utilities\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.825118 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-catalog-content\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.843046 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swcgx\" (UniqueName: \"kubernetes.io/projected/4587f03a-c140-4a5b-a4fd-274544074711-kube-api-access-swcgx\") pod \"redhat-marketplace-mj2rk\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:05 crc kubenswrapper[5032]: I0122 17:40:05.941497 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:06 crc kubenswrapper[5032]: I0122 17:40:06.433495 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mj2rk"] Jan 22 17:40:06 crc kubenswrapper[5032]: I0122 17:40:06.781018 5032 generic.go:334] "Generic (PLEG): container finished" podID="4587f03a-c140-4a5b-a4fd-274544074711" containerID="225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484" exitCode=0 Jan 22 17:40:06 crc kubenswrapper[5032]: I0122 17:40:06.781153 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mj2rk" event={"ID":"4587f03a-c140-4a5b-a4fd-274544074711","Type":"ContainerDied","Data":"225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484"} Jan 22 17:40:06 crc kubenswrapper[5032]: I0122 17:40:06.781451 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mj2rk" event={"ID":"4587f03a-c140-4a5b-a4fd-274544074711","Type":"ContainerStarted","Data":"6486fbf29ac0d1173c2134d5d5b959361bfea14c2ceaa3888cb6f8fb4a2016ea"} Jan 22 17:40:07 crc kubenswrapper[5032]: I0122 17:40:07.791629 5032 generic.go:334] "Generic (PLEG): container finished" podID="4587f03a-c140-4a5b-a4fd-274544074711" containerID="4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04" exitCode=0 Jan 22 17:40:07 crc kubenswrapper[5032]: I0122 17:40:07.791736 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mj2rk" event={"ID":"4587f03a-c140-4a5b-a4fd-274544074711","Type":"ContainerDied","Data":"4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04"} Jan 22 17:40:08 crc kubenswrapper[5032]: I0122 17:40:08.804272 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mj2rk" event={"ID":"4587f03a-c140-4a5b-a4fd-274544074711","Type":"ContainerStarted","Data":"32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4"} Jan 22 17:40:08 crc kubenswrapper[5032]: I0122 17:40:08.825574 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mj2rk" podStartSLOduration=2.346785505 podStartE2EDuration="3.825556763s" podCreationTimestamp="2026-01-22 17:40:05 +0000 UTC" firstStartedPulling="2026-01-22 17:40:06.784379707 +0000 UTC m=+5710.596548738" lastFinishedPulling="2026-01-22 17:40:08.263150965 +0000 UTC m=+5712.075319996" observedRunningTime="2026-01-22 17:40:08.822704527 +0000 UTC m=+5712.634873578" watchObservedRunningTime="2026-01-22 17:40:08.825556763 +0000 UTC m=+5712.637725794" Jan 22 17:40:15 crc kubenswrapper[5032]: I0122 17:40:15.942652 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:15 crc kubenswrapper[5032]: I0122 17:40:15.943354 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:15 crc kubenswrapper[5032]: I0122 17:40:15.998952 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:16 crc kubenswrapper[5032]: I0122 17:40:16.930674 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:16 crc kubenswrapper[5032]: I0122 17:40:16.985165 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mj2rk"] Jan 22 17:40:18 crc kubenswrapper[5032]: I0122 17:40:18.890884 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mj2rk" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="registry-server" containerID="cri-o://32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4" gracePeriod=2 Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.716327 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.801003 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swcgx\" (UniqueName: \"kubernetes.io/projected/4587f03a-c140-4a5b-a4fd-274544074711-kube-api-access-swcgx\") pod \"4587f03a-c140-4a5b-a4fd-274544074711\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.801227 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-catalog-content\") pod \"4587f03a-c140-4a5b-a4fd-274544074711\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.801255 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-utilities\") pod \"4587f03a-c140-4a5b-a4fd-274544074711\" (UID: \"4587f03a-c140-4a5b-a4fd-274544074711\") " Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.802109 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-utilities" (OuterVolumeSpecName: "utilities") pod "4587f03a-c140-4a5b-a4fd-274544074711" (UID: "4587f03a-c140-4a5b-a4fd-274544074711"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.806894 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4587f03a-c140-4a5b-a4fd-274544074711-kube-api-access-swcgx" (OuterVolumeSpecName: "kube-api-access-swcgx") pod "4587f03a-c140-4a5b-a4fd-274544074711" (UID: "4587f03a-c140-4a5b-a4fd-274544074711"). InnerVolumeSpecName "kube-api-access-swcgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.825567 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4587f03a-c140-4a5b-a4fd-274544074711" (UID: "4587f03a-c140-4a5b-a4fd-274544074711"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.902829 5032 generic.go:334] "Generic (PLEG): container finished" podID="4587f03a-c140-4a5b-a4fd-274544074711" containerID="32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4" exitCode=0 Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.902896 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mj2rk" event={"ID":"4587f03a-c140-4a5b-a4fd-274544074711","Type":"ContainerDied","Data":"32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4"} Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.902936 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mj2rk" event={"ID":"4587f03a-c140-4a5b-a4fd-274544074711","Type":"ContainerDied","Data":"6486fbf29ac0d1173c2134d5d5b959361bfea14c2ceaa3888cb6f8fb4a2016ea"} Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.902956 5032 scope.go:117] "RemoveContainer" containerID="32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.903153 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.903181 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4587f03a-c140-4a5b-a4fd-274544074711-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.903191 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swcgx\" (UniqueName: \"kubernetes.io/projected/4587f03a-c140-4a5b-a4fd-274544074711-kube-api-access-swcgx\") on node \"crc\" DevicePath \"\"" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.903160 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mj2rk" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.948359 5032 scope.go:117] "RemoveContainer" containerID="4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04" Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.964803 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mj2rk"] Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.980576 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mj2rk"] Jan 22 17:40:19 crc kubenswrapper[5032]: I0122 17:40:19.999289 5032 scope.go:117] "RemoveContainer" containerID="225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484" Jan 22 17:40:20 crc kubenswrapper[5032]: I0122 17:40:20.040310 5032 scope.go:117] "RemoveContainer" containerID="32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4" Jan 22 17:40:20 crc kubenswrapper[5032]: E0122 17:40:20.041169 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4\": container with ID starting with 32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4 not found: ID does not exist" containerID="32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4" Jan 22 17:40:20 crc kubenswrapper[5032]: I0122 17:40:20.041219 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4"} err="failed to get container status \"32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4\": rpc error: code = NotFound desc = could not find container \"32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4\": container with ID starting with 32dcc9cd34ed3ac8855308ae8be4bb9ade8c251cc3ff9b538789ac03bdd818b4 not found: ID does not exist" Jan 22 17:40:20 crc kubenswrapper[5032]: I0122 17:40:20.041252 5032 scope.go:117] "RemoveContainer" containerID="4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04" Jan 22 17:40:20 crc kubenswrapper[5032]: E0122 17:40:20.041537 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04\": container with ID starting with 4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04 not found: ID does not exist" containerID="4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04" Jan 22 17:40:20 crc kubenswrapper[5032]: I0122 17:40:20.041561 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04"} err="failed to get container status \"4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04\": rpc error: code = NotFound desc = could not find container \"4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04\": container with ID starting with 4cc4668f86ccee8ee59c9eca284174b9055fc6c4024cca69f78a6cf5e125dd04 not found: ID does not exist" Jan 22 17:40:20 crc kubenswrapper[5032]: I0122 17:40:20.041577 5032 scope.go:117] "RemoveContainer" containerID="225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484" Jan 22 17:40:20 crc kubenswrapper[5032]: E0122 17:40:20.041800 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484\": container with ID starting with 225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484 not found: ID does not exist" containerID="225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484" Jan 22 17:40:20 crc kubenswrapper[5032]: I0122 17:40:20.041826 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484"} err="failed to get container status \"225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484\": rpc error: code = NotFound desc = could not find container \"225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484\": container with ID starting with 225a00db9cfcf41393ab794dd27ce05e37eb1f821c34acc2391a2a290b0d2484 not found: ID does not exist" Jan 22 17:40:20 crc kubenswrapper[5032]: I0122 17:40:20.471578 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4587f03a-c140-4a5b-a4fd-274544074711" path="/var/lib/kubelet/pods/4587f03a-c140-4a5b-a4fd-274544074711/volumes" Jan 22 17:40:33 crc kubenswrapper[5032]: I0122 17:40:33.016727 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:40:33 crc kubenswrapper[5032]: I0122 17:40:33.017507 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:41:03 crc kubenswrapper[5032]: I0122 17:41:03.016754 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:41:03 crc kubenswrapper[5032]: I0122 17:41:03.017325 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:41:03 crc kubenswrapper[5032]: I0122 17:41:03.017367 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:41:03 crc kubenswrapper[5032]: I0122 17:41:03.018124 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:41:03 crc kubenswrapper[5032]: I0122 17:41:03.018167 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" gracePeriod=600 Jan 22 17:41:03 crc kubenswrapper[5032]: E0122 17:41:03.773557 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:41:04 crc kubenswrapper[5032]: I0122 17:41:04.323046 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" exitCode=0 Jan 22 17:41:04 crc kubenswrapper[5032]: I0122 17:41:04.323124 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d"} Jan 22 17:41:04 crc kubenswrapper[5032]: I0122 17:41:04.323394 5032 scope.go:117] "RemoveContainer" containerID="f39db55c10b9a8d1741b4c30249a2edc50e6c4dd1e76a1c0ed6f4e6ecba7bde1" Jan 22 17:41:04 crc kubenswrapper[5032]: I0122 17:41:04.324139 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:41:04 crc kubenswrapper[5032]: E0122 17:41:04.324412 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:41:19 crc kubenswrapper[5032]: I0122 17:41:19.455379 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:41:19 crc kubenswrapper[5032]: E0122 17:41:19.456921 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:41:31 crc kubenswrapper[5032]: I0122 17:41:31.454441 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:41:31 crc kubenswrapper[5032]: E0122 17:41:31.455256 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:41:44 crc kubenswrapper[5032]: I0122 17:41:44.454993 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:41:44 crc kubenswrapper[5032]: E0122 17:41:44.455819 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:41:56 crc kubenswrapper[5032]: I0122 17:41:56.466127 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:41:56 crc kubenswrapper[5032]: E0122 17:41:56.467432 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:42:09 crc kubenswrapper[5032]: I0122 17:42:09.454490 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:42:09 crc kubenswrapper[5032]: E0122 17:42:09.455239 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:42:24 crc kubenswrapper[5032]: I0122 17:42:24.455169 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:42:24 crc kubenswrapper[5032]: E0122 17:42:24.455751 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:42:37 crc kubenswrapper[5032]: I0122 17:42:37.455214 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:42:37 crc kubenswrapper[5032]: E0122 17:42:37.456108 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:42:49 crc kubenswrapper[5032]: I0122 17:42:49.455350 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:42:49 crc kubenswrapper[5032]: E0122 17:42:49.456148 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:43:03 crc kubenswrapper[5032]: I0122 17:43:03.454760 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:43:03 crc kubenswrapper[5032]: E0122 17:43:03.455683 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:43:14 crc kubenswrapper[5032]: I0122 17:43:14.454462 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:43:14 crc kubenswrapper[5032]: E0122 17:43:14.455352 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:43:26 crc kubenswrapper[5032]: I0122 17:43:26.462383 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:43:26 crc kubenswrapper[5032]: E0122 17:43:26.463586 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:43:37 crc kubenswrapper[5032]: I0122 17:43:37.455000 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:43:37 crc kubenswrapper[5032]: E0122 17:43:37.456335 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:43:50 crc kubenswrapper[5032]: I0122 17:43:50.454814 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:43:50 crc kubenswrapper[5032]: E0122 17:43:50.455851 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:44:02 crc kubenswrapper[5032]: I0122 17:44:02.454706 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:44:02 crc kubenswrapper[5032]: E0122 17:44:02.455350 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:44:13 crc kubenswrapper[5032]: I0122 17:44:13.455169 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:44:13 crc kubenswrapper[5032]: E0122 17:44:13.455893 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:44:28 crc kubenswrapper[5032]: I0122 17:44:28.455287 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:44:28 crc kubenswrapper[5032]: E0122 17:44:28.456192 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.087102 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tf7gh"] Jan 22 17:44:32 crc kubenswrapper[5032]: E0122 17:44:32.088701 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="extract-utilities" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.088760 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="extract-utilities" Jan 22 17:44:32 crc kubenswrapper[5032]: E0122 17:44:32.088806 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="extract-content" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.088824 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="extract-content" Jan 22 17:44:32 crc kubenswrapper[5032]: E0122 17:44:32.088911 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="registry-server" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.088931 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="registry-server" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.089426 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4587f03a-c140-4a5b-a4fd-274544074711" containerName="registry-server" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.092708 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.117646 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-utilities\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.118024 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tf7gh"] Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.118084 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f544w\" (UniqueName: \"kubernetes.io/projected/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-kube-api-access-f544w\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.118189 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-catalog-content\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.219528 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-utilities\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.219689 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f544w\" (UniqueName: \"kubernetes.io/projected/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-kube-api-access-f544w\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.219719 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-catalog-content\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.220005 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-utilities\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.220156 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-catalog-content\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.251339 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f544w\" (UniqueName: \"kubernetes.io/projected/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-kube-api-access-f544w\") pod \"community-operators-tf7gh\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.427768 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:32 crc kubenswrapper[5032]: I0122 17:44:32.934757 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tf7gh"] Jan 22 17:44:32 crc kubenswrapper[5032]: W0122 17:44:32.938429 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8df6a30f_17ad_4b4d_8b60_4bbe208d7cf5.slice/crio-ffd80956ae193c22d79a94ff805bdbca765c849dcfc93b8546419fa48ab7c7bf WatchSource:0}: Error finding container ffd80956ae193c22d79a94ff805bdbca765c849dcfc93b8546419fa48ab7c7bf: Status 404 returned error can't find the container with id ffd80956ae193c22d79a94ff805bdbca765c849dcfc93b8546419fa48ab7c7bf Jan 22 17:44:33 crc kubenswrapper[5032]: I0122 17:44:33.178191 5032 generic.go:334] "Generic (PLEG): container finished" podID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerID="9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d" exitCode=0 Jan 22 17:44:33 crc kubenswrapper[5032]: I0122 17:44:33.178234 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tf7gh" event={"ID":"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5","Type":"ContainerDied","Data":"9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d"} Jan 22 17:44:33 crc kubenswrapper[5032]: I0122 17:44:33.178269 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tf7gh" event={"ID":"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5","Type":"ContainerStarted","Data":"ffd80956ae193c22d79a94ff805bdbca765c849dcfc93b8546419fa48ab7c7bf"} Jan 22 17:44:33 crc kubenswrapper[5032]: I0122 17:44:33.180035 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:44:34 crc kubenswrapper[5032]: I0122 17:44:34.190952 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tf7gh" event={"ID":"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5","Type":"ContainerStarted","Data":"7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4"} Jan 22 17:44:35 crc kubenswrapper[5032]: I0122 17:44:35.206039 5032 generic.go:334] "Generic (PLEG): container finished" podID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerID="7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4" exitCode=0 Jan 22 17:44:35 crc kubenswrapper[5032]: I0122 17:44:35.206111 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tf7gh" event={"ID":"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5","Type":"ContainerDied","Data":"7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4"} Jan 22 17:44:36 crc kubenswrapper[5032]: I0122 17:44:36.215609 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tf7gh" event={"ID":"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5","Type":"ContainerStarted","Data":"75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7"} Jan 22 17:44:36 crc kubenswrapper[5032]: I0122 17:44:36.240626 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tf7gh" podStartSLOduration=1.796017908 podStartE2EDuration="4.240605721s" podCreationTimestamp="2026-01-22 17:44:32 +0000 UTC" firstStartedPulling="2026-01-22 17:44:33.179804967 +0000 UTC m=+5976.991973998" lastFinishedPulling="2026-01-22 17:44:35.62439278 +0000 UTC m=+5979.436561811" observedRunningTime="2026-01-22 17:44:36.231640442 +0000 UTC m=+5980.043809483" watchObservedRunningTime="2026-01-22 17:44:36.240605721 +0000 UTC m=+5980.052774772" Jan 22 17:44:39 crc kubenswrapper[5032]: I0122 17:44:39.455263 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:44:39 crc kubenswrapper[5032]: E0122 17:44:39.456025 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:44:42 crc kubenswrapper[5032]: I0122 17:44:42.428397 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:42 crc kubenswrapper[5032]: I0122 17:44:42.429638 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:42 crc kubenswrapper[5032]: I0122 17:44:42.501391 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:43 crc kubenswrapper[5032]: I0122 17:44:43.330341 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:43 crc kubenswrapper[5032]: I0122 17:44:43.379680 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tf7gh"] Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.306109 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tf7gh" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="registry-server" containerID="cri-o://75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7" gracePeriod=2 Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.800255 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.877293 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-catalog-content\") pod \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.877445 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-utilities\") pod \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.877618 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f544w\" (UniqueName: \"kubernetes.io/projected/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-kube-api-access-f544w\") pod \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\" (UID: \"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5\") " Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.878422 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-utilities" (OuterVolumeSpecName: "utilities") pod "8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" (UID: "8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.884938 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-kube-api-access-f544w" (OuterVolumeSpecName: "kube-api-access-f544w") pod "8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" (UID: "8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5"). InnerVolumeSpecName "kube-api-access-f544w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.931957 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" (UID: "8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.979722 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f544w\" (UniqueName: \"kubernetes.io/projected/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-kube-api-access-f544w\") on node \"crc\" DevicePath \"\"" Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.979754 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:44:45 crc kubenswrapper[5032]: I0122 17:44:45.979764 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.320149 5032 generic.go:334] "Generic (PLEG): container finished" podID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerID="75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7" exitCode=0 Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.320214 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tf7gh" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.320218 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tf7gh" event={"ID":"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5","Type":"ContainerDied","Data":"75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7"} Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.320305 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tf7gh" event={"ID":"8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5","Type":"ContainerDied","Data":"ffd80956ae193c22d79a94ff805bdbca765c849dcfc93b8546419fa48ab7c7bf"} Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.320351 5032 scope.go:117] "RemoveContainer" containerID="75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.357047 5032 scope.go:117] "RemoveContainer" containerID="7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.369448 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tf7gh"] Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.379700 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tf7gh"] Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.390390 5032 scope.go:117] "RemoveContainer" containerID="9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.445777 5032 scope.go:117] "RemoveContainer" containerID="75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7" Jan 22 17:44:46 crc kubenswrapper[5032]: E0122 17:44:46.446231 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7\": container with ID starting with 75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7 not found: ID does not exist" containerID="75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.446304 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7"} err="failed to get container status \"75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7\": rpc error: code = NotFound desc = could not find container \"75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7\": container with ID starting with 75838b3ff3c7cae437ee6cce27f834089e804655dc8524d379747dd96fafa6b7 not found: ID does not exist" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.446339 5032 scope.go:117] "RemoveContainer" containerID="7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4" Jan 22 17:44:46 crc kubenswrapper[5032]: E0122 17:44:46.446684 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4\": container with ID starting with 7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4 not found: ID does not exist" containerID="7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.446743 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4"} err="failed to get container status \"7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4\": rpc error: code = NotFound desc = could not find container \"7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4\": container with ID starting with 7c6a9336069179a0cdec936d3a19e6c47746b9706789896cbd712110d82551b4 not found: ID does not exist" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.446778 5032 scope.go:117] "RemoveContainer" containerID="9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d" Jan 22 17:44:46 crc kubenswrapper[5032]: E0122 17:44:46.447480 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d\": container with ID starting with 9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d not found: ID does not exist" containerID="9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.447514 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d"} err="failed to get container status \"9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d\": rpc error: code = NotFound desc = could not find container \"9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d\": container with ID starting with 9ce368b44ad50d34f498b5b64b2b682cfb9952ba49e20b66026bf49624cdd79d not found: ID does not exist" Jan 22 17:44:46 crc kubenswrapper[5032]: I0122 17:44:46.465748 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" path="/var/lib/kubelet/pods/8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5/volumes" Jan 22 17:44:54 crc kubenswrapper[5032]: I0122 17:44:54.455240 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:44:54 crc kubenswrapper[5032]: E0122 17:44:54.456480 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.157621 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5"] Jan 22 17:45:00 crc kubenswrapper[5032]: E0122 17:45:00.158642 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="registry-server" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.158658 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="registry-server" Jan 22 17:45:00 crc kubenswrapper[5032]: E0122 17:45:00.158688 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="extract-content" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.158694 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="extract-content" Jan 22 17:45:00 crc kubenswrapper[5032]: E0122 17:45:00.158706 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="extract-utilities" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.158711 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="extract-utilities" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.158920 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="8df6a30f-17ad-4b4d-8b60-4bbe208d7cf5" containerName="registry-server" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.159635 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.162591 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.162965 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.179260 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5"] Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.258356 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01c2584a-e857-42a7-b92b-00a88735324a-config-volume\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.258437 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxq4c\" (UniqueName: \"kubernetes.io/projected/01c2584a-e857-42a7-b92b-00a88735324a-kube-api-access-rxq4c\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.258803 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01c2584a-e857-42a7-b92b-00a88735324a-secret-volume\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.360173 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxq4c\" (UniqueName: \"kubernetes.io/projected/01c2584a-e857-42a7-b92b-00a88735324a-kube-api-access-rxq4c\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.360312 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01c2584a-e857-42a7-b92b-00a88735324a-secret-volume\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.360376 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01c2584a-e857-42a7-b92b-00a88735324a-config-volume\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.361322 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01c2584a-e857-42a7-b92b-00a88735324a-config-volume\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.367591 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01c2584a-e857-42a7-b92b-00a88735324a-secret-volume\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.380771 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxq4c\" (UniqueName: \"kubernetes.io/projected/01c2584a-e857-42a7-b92b-00a88735324a-kube-api-access-rxq4c\") pod \"collect-profiles-29485065-767l5\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.489366 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:00 crc kubenswrapper[5032]: I0122 17:45:00.965309 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5"] Jan 22 17:45:01 crc kubenswrapper[5032]: I0122 17:45:01.458207 5032 generic.go:334] "Generic (PLEG): container finished" podID="01c2584a-e857-42a7-b92b-00a88735324a" containerID="e7d8279e08a1bcd03d2e5e3a9ec60ef7d6e632bc721247802187119087ee19a7" exitCode=0 Jan 22 17:45:01 crc kubenswrapper[5032]: I0122 17:45:01.458241 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" event={"ID":"01c2584a-e857-42a7-b92b-00a88735324a","Type":"ContainerDied","Data":"e7d8279e08a1bcd03d2e5e3a9ec60ef7d6e632bc721247802187119087ee19a7"} Jan 22 17:45:01 crc kubenswrapper[5032]: I0122 17:45:01.458503 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" event={"ID":"01c2584a-e857-42a7-b92b-00a88735324a","Type":"ContainerStarted","Data":"89c59f6987ac3f461ed847d4718bd6f651f2d6fc188a7b0c99fd4ea22938ae6f"} Jan 22 17:45:02 crc kubenswrapper[5032]: I0122 17:45:02.913973 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.019825 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01c2584a-e857-42a7-b92b-00a88735324a-secret-volume\") pod \"01c2584a-e857-42a7-b92b-00a88735324a\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.019980 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01c2584a-e857-42a7-b92b-00a88735324a-config-volume\") pod \"01c2584a-e857-42a7-b92b-00a88735324a\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.020081 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxq4c\" (UniqueName: \"kubernetes.io/projected/01c2584a-e857-42a7-b92b-00a88735324a-kube-api-access-rxq4c\") pod \"01c2584a-e857-42a7-b92b-00a88735324a\" (UID: \"01c2584a-e857-42a7-b92b-00a88735324a\") " Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.020410 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01c2584a-e857-42a7-b92b-00a88735324a-config-volume" (OuterVolumeSpecName: "config-volume") pod "01c2584a-e857-42a7-b92b-00a88735324a" (UID: "01c2584a-e857-42a7-b92b-00a88735324a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.020832 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01c2584a-e857-42a7-b92b-00a88735324a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.026095 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01c2584a-e857-42a7-b92b-00a88735324a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "01c2584a-e857-42a7-b92b-00a88735324a" (UID: "01c2584a-e857-42a7-b92b-00a88735324a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.026152 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01c2584a-e857-42a7-b92b-00a88735324a-kube-api-access-rxq4c" (OuterVolumeSpecName: "kube-api-access-rxq4c") pod "01c2584a-e857-42a7-b92b-00a88735324a" (UID: "01c2584a-e857-42a7-b92b-00a88735324a"). InnerVolumeSpecName "kube-api-access-rxq4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.123108 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01c2584a-e857-42a7-b92b-00a88735324a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.123147 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxq4c\" (UniqueName: \"kubernetes.io/projected/01c2584a-e857-42a7-b92b-00a88735324a-kube-api-access-rxq4c\") on node \"crc\" DevicePath \"\"" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.478068 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" event={"ID":"01c2584a-e857-42a7-b92b-00a88735324a","Type":"ContainerDied","Data":"89c59f6987ac3f461ed847d4718bd6f651f2d6fc188a7b0c99fd4ea22938ae6f"} Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.478429 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89c59f6987ac3f461ed847d4718bd6f651f2d6fc188a7b0c99fd4ea22938ae6f" Jan 22 17:45:03 crc kubenswrapper[5032]: I0122 17:45:03.478146 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485065-767l5" Jan 22 17:45:04 crc kubenswrapper[5032]: I0122 17:45:04.000665 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t"] Jan 22 17:45:04 crc kubenswrapper[5032]: I0122 17:45:04.010688 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485020-wwr9t"] Jan 22 17:45:04 crc kubenswrapper[5032]: I0122 17:45:04.470005 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf468fca-eadf-4bae-8d37-1d287a5f2a25" path="/var/lib/kubelet/pods/cf468fca-eadf-4bae-8d37-1d287a5f2a25/volumes" Jan 22 17:45:07 crc kubenswrapper[5032]: I0122 17:45:07.455417 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:45:07 crc kubenswrapper[5032]: E0122 17:45:07.456273 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:45:18 crc kubenswrapper[5032]: I0122 17:45:18.454814 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:45:18 crc kubenswrapper[5032]: E0122 17:45:18.455645 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:45:21 crc kubenswrapper[5032]: I0122 17:45:21.512400 5032 scope.go:117] "RemoveContainer" containerID="e91408ecbf588fe43b0683b70ca6b7f5a125faa7e9387cb7f5fd19bf5baf8595" Jan 22 17:45:31 crc kubenswrapper[5032]: I0122 17:45:31.454579 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:45:31 crc kubenswrapper[5032]: E0122 17:45:31.455291 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:45:46 crc kubenswrapper[5032]: I0122 17:45:46.461308 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:45:46 crc kubenswrapper[5032]: E0122 17:45:46.462109 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:45:58 crc kubenswrapper[5032]: I0122 17:45:58.454784 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:45:58 crc kubenswrapper[5032]: E0122 17:45:58.455762 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:46:12 crc kubenswrapper[5032]: I0122 17:46:12.455813 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:46:13 crc kubenswrapper[5032]: I0122 17:46:13.127088 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"722f347463ab474d0fb60065c7a7bdf76e6459e6bd256a877d3be60b771c86f7"} Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.000480 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n29hj"] Jan 22 17:46:22 crc kubenswrapper[5032]: E0122 17:46:22.001631 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01c2584a-e857-42a7-b92b-00a88735324a" containerName="collect-profiles" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.001652 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="01c2584a-e857-42a7-b92b-00a88735324a" containerName="collect-profiles" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.001975 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="01c2584a-e857-42a7-b92b-00a88735324a" containerName="collect-profiles" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.003653 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.015701 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n29hj"] Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.033523 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x28ht\" (UniqueName: \"kubernetes.io/projected/a66a029a-23a5-4b4c-89b4-bfa344805527-kube-api-access-x28ht\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.033612 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66a029a-23a5-4b4c-89b4-bfa344805527-utilities\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.033675 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66a029a-23a5-4b4c-89b4-bfa344805527-catalog-content\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.134930 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66a029a-23a5-4b4c-89b4-bfa344805527-utilities\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.135032 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66a029a-23a5-4b4c-89b4-bfa344805527-catalog-content\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.135112 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x28ht\" (UniqueName: \"kubernetes.io/projected/a66a029a-23a5-4b4c-89b4-bfa344805527-kube-api-access-x28ht\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.135479 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66a029a-23a5-4b4c-89b4-bfa344805527-utilities\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.135741 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66a029a-23a5-4b4c-89b4-bfa344805527-catalog-content\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.159279 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x28ht\" (UniqueName: \"kubernetes.io/projected/a66a029a-23a5-4b4c-89b4-bfa344805527-kube-api-access-x28ht\") pod \"certified-operators-n29hj\" (UID: \"a66a029a-23a5-4b4c-89b4-bfa344805527\") " pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.326211 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:22 crc kubenswrapper[5032]: I0122 17:46:22.807667 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n29hj"] Jan 22 17:46:23 crc kubenswrapper[5032]: I0122 17:46:23.253878 5032 generic.go:334] "Generic (PLEG): container finished" podID="a66a029a-23a5-4b4c-89b4-bfa344805527" containerID="2da0567bb40888b8ecba6eb8e098879c7219a46b7ea17325b3cae3cec1573e4c" exitCode=0 Jan 22 17:46:23 crc kubenswrapper[5032]: I0122 17:46:23.253923 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n29hj" event={"ID":"a66a029a-23a5-4b4c-89b4-bfa344805527","Type":"ContainerDied","Data":"2da0567bb40888b8ecba6eb8e098879c7219a46b7ea17325b3cae3cec1573e4c"} Jan 22 17:46:23 crc kubenswrapper[5032]: I0122 17:46:23.253949 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n29hj" event={"ID":"a66a029a-23a5-4b4c-89b4-bfa344805527","Type":"ContainerStarted","Data":"a0a1efc85bf5bfa325599e924ae715d3728789698d5fcdcf6e79ad4a7ee42d9b"} Jan 22 17:46:27 crc kubenswrapper[5032]: I0122 17:46:27.329739 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n29hj" event={"ID":"a66a029a-23a5-4b4c-89b4-bfa344805527","Type":"ContainerStarted","Data":"b7888bc84e4f4b135b8574819205b8f8d3704245adeb1482da9c9547d10ed856"} Jan 22 17:46:28 crc kubenswrapper[5032]: I0122 17:46:28.342569 5032 generic.go:334] "Generic (PLEG): container finished" podID="a66a029a-23a5-4b4c-89b4-bfa344805527" containerID="b7888bc84e4f4b135b8574819205b8f8d3704245adeb1482da9c9547d10ed856" exitCode=0 Jan 22 17:46:28 crc kubenswrapper[5032]: I0122 17:46:28.342617 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n29hj" event={"ID":"a66a029a-23a5-4b4c-89b4-bfa344805527","Type":"ContainerDied","Data":"b7888bc84e4f4b135b8574819205b8f8d3704245adeb1482da9c9547d10ed856"} Jan 22 17:46:29 crc kubenswrapper[5032]: I0122 17:46:29.352485 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n29hj" event={"ID":"a66a029a-23a5-4b4c-89b4-bfa344805527","Type":"ContainerStarted","Data":"68005aca4b8b17c3f17c07545c6ed8c0b39963c525b244aa79c73936e0e8f210"} Jan 22 17:46:29 crc kubenswrapper[5032]: I0122 17:46:29.374646 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n29hj" podStartSLOduration=2.850245623 podStartE2EDuration="8.374627329s" podCreationTimestamp="2026-01-22 17:46:21 +0000 UTC" firstStartedPulling="2026-01-22 17:46:23.255541556 +0000 UTC m=+6087.067710587" lastFinishedPulling="2026-01-22 17:46:28.779923262 +0000 UTC m=+6092.592092293" observedRunningTime="2026-01-22 17:46:29.372314927 +0000 UTC m=+6093.184483978" watchObservedRunningTime="2026-01-22 17:46:29.374627329 +0000 UTC m=+6093.186796360" Jan 22 17:46:32 crc kubenswrapper[5032]: I0122 17:46:32.327197 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:32 crc kubenswrapper[5032]: I0122 17:46:32.327780 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:32 crc kubenswrapper[5032]: I0122 17:46:32.372879 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:42 crc kubenswrapper[5032]: I0122 17:46:42.376623 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n29hj" Jan 22 17:46:42 crc kubenswrapper[5032]: I0122 17:46:42.470824 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n29hj"] Jan 22 17:46:42 crc kubenswrapper[5032]: I0122 17:46:42.534242 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lpfnw"] Jan 22 17:46:42 crc kubenswrapper[5032]: I0122 17:46:42.534488 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lpfnw" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="registry-server" containerID="cri-o://2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0" gracePeriod=2 Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.453117 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.480613 5032 generic.go:334] "Generic (PLEG): container finished" podID="63ac7df3-5723-464e-af5d-873703270818" containerID="2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0" exitCode=0 Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.480653 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpfnw" event={"ID":"63ac7df3-5723-464e-af5d-873703270818","Type":"ContainerDied","Data":"2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0"} Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.480682 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpfnw" event={"ID":"63ac7df3-5723-464e-af5d-873703270818","Type":"ContainerDied","Data":"8dd2ba8d3d4f668b46ae1a9581616003469d9337487547eb46c8c8e560893087"} Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.480687 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpfnw" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.480701 5032 scope.go:117] "RemoveContainer" containerID="2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.516027 5032 scope.go:117] "RemoveContainer" containerID="94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.537794 5032 scope.go:117] "RemoveContainer" containerID="7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.585602 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-catalog-content\") pod \"63ac7df3-5723-464e-af5d-873703270818\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.585980 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-utilities\") pod \"63ac7df3-5723-464e-af5d-873703270818\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.586100 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmv2t\" (UniqueName: \"kubernetes.io/projected/63ac7df3-5723-464e-af5d-873703270818-kube-api-access-rmv2t\") pod \"63ac7df3-5723-464e-af5d-873703270818\" (UID: \"63ac7df3-5723-464e-af5d-873703270818\") " Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.586895 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-utilities" (OuterVolumeSpecName: "utilities") pod "63ac7df3-5723-464e-af5d-873703270818" (UID: "63ac7df3-5723-464e-af5d-873703270818"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.587967 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.598353 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63ac7df3-5723-464e-af5d-873703270818-kube-api-access-rmv2t" (OuterVolumeSpecName: "kube-api-access-rmv2t") pod "63ac7df3-5723-464e-af5d-873703270818" (UID: "63ac7df3-5723-464e-af5d-873703270818"). InnerVolumeSpecName "kube-api-access-rmv2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.603676 5032 scope.go:117] "RemoveContainer" containerID="2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0" Jan 22 17:46:43 crc kubenswrapper[5032]: E0122 17:46:43.605360 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0\": container with ID starting with 2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0 not found: ID does not exist" containerID="2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.605414 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0"} err="failed to get container status \"2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0\": rpc error: code = NotFound desc = could not find container \"2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0\": container with ID starting with 2664039abc798a14811f41c704554f907532db0dc3e7b9c5ffd9fa3a956341d0 not found: ID does not exist" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.605450 5032 scope.go:117] "RemoveContainer" containerID="94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b" Jan 22 17:46:43 crc kubenswrapper[5032]: E0122 17:46:43.609895 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b\": container with ID starting with 94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b not found: ID does not exist" containerID="94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.609933 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b"} err="failed to get container status \"94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b\": rpc error: code = NotFound desc = could not find container \"94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b\": container with ID starting with 94cdce77c8f6e11511dbfd16e162b376a78ea0ce348fe9f517c8b98a9b0bbf8b not found: ID does not exist" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.609957 5032 scope.go:117] "RemoveContainer" containerID="7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420" Jan 22 17:46:43 crc kubenswrapper[5032]: E0122 17:46:43.610518 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420\": container with ID starting with 7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420 not found: ID does not exist" containerID="7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.610542 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420"} err="failed to get container status \"7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420\": rpc error: code = NotFound desc = could not find container \"7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420\": container with ID starting with 7f102349fc5f5d131141ca20acb1d4c60cefedaa89651f66b673b86d4cd7e420 not found: ID does not exist" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.654368 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63ac7df3-5723-464e-af5d-873703270818" (UID: "63ac7df3-5723-464e-af5d-873703270818"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.689624 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmv2t\" (UniqueName: \"kubernetes.io/projected/63ac7df3-5723-464e-af5d-873703270818-kube-api-access-rmv2t\") on node \"crc\" DevicePath \"\"" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.689662 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63ac7df3-5723-464e-af5d-873703270818-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.820572 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lpfnw"] Jan 22 17:46:43 crc kubenswrapper[5032]: I0122 17:46:43.828067 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lpfnw"] Jan 22 17:46:44 crc kubenswrapper[5032]: I0122 17:46:44.472139 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63ac7df3-5723-464e-af5d-873703270818" path="/var/lib/kubelet/pods/63ac7df3-5723-464e-af5d-873703270818/volumes" Jan 22 17:48:33 crc kubenswrapper[5032]: I0122 17:48:33.017011 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:48:33 crc kubenswrapper[5032]: I0122 17:48:33.017617 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:49:03 crc kubenswrapper[5032]: I0122 17:49:03.017173 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:49:03 crc kubenswrapper[5032]: I0122 17:49:03.017721 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.016506 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.017116 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.017162 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.018166 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"722f347463ab474d0fb60065c7a7bdf76e6459e6bd256a877d3be60b771c86f7"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.018232 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://722f347463ab474d0fb60065c7a7bdf76e6459e6bd256a877d3be60b771c86f7" gracePeriod=600 Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.175760 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="722f347463ab474d0fb60065c7a7bdf76e6459e6bd256a877d3be60b771c86f7" exitCode=0 Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.175813 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"722f347463ab474d0fb60065c7a7bdf76e6459e6bd256a877d3be60b771c86f7"} Jan 22 17:49:33 crc kubenswrapper[5032]: I0122 17:49:33.176206 5032 scope.go:117] "RemoveContainer" containerID="045818a78d2ce40f0f78e18ffdb711b694064884780c3807a54d61d9c2c9735d" Jan 22 17:49:34 crc kubenswrapper[5032]: I0122 17:49:34.187390 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57"} Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.830961 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dptvr"] Jan 22 17:49:36 crc kubenswrapper[5032]: E0122 17:49:36.831965 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="extract-utilities" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.831983 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="extract-utilities" Jan 22 17:49:36 crc kubenswrapper[5032]: E0122 17:49:36.832038 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="extract-content" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.832046 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="extract-content" Jan 22 17:49:36 crc kubenswrapper[5032]: E0122 17:49:36.832062 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="registry-server" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.832070 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="registry-server" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.832253 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="63ac7df3-5723-464e-af5d-873703270818" containerName="registry-server" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.834025 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.840983 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dptvr"] Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.974607 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-catalog-content\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.974820 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-utilities\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:36 crc kubenswrapper[5032]: I0122 17:49:36.974906 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j9sl\" (UniqueName: \"kubernetes.io/projected/4227669d-b1ba-4dcf-976c-940162e7d253-kube-api-access-5j9sl\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.076259 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-catalog-content\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.076696 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-utilities\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.076848 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j9sl\" (UniqueName: \"kubernetes.io/projected/4227669d-b1ba-4dcf-976c-940162e7d253-kube-api-access-5j9sl\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.076886 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-catalog-content\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.077167 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-utilities\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.102279 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j9sl\" (UniqueName: \"kubernetes.io/projected/4227669d-b1ba-4dcf-976c-940162e7d253-kube-api-access-5j9sl\") pod \"redhat-operators-dptvr\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.170112 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:37 crc kubenswrapper[5032]: I0122 17:49:37.643782 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dptvr"] Jan 22 17:49:38 crc kubenswrapper[5032]: I0122 17:49:38.217841 5032 generic.go:334] "Generic (PLEG): container finished" podID="4227669d-b1ba-4dcf-976c-940162e7d253" containerID="687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce" exitCode=0 Jan 22 17:49:38 crc kubenswrapper[5032]: I0122 17:49:38.217909 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dptvr" event={"ID":"4227669d-b1ba-4dcf-976c-940162e7d253","Type":"ContainerDied","Data":"687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce"} Jan 22 17:49:38 crc kubenswrapper[5032]: I0122 17:49:38.218986 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dptvr" event={"ID":"4227669d-b1ba-4dcf-976c-940162e7d253","Type":"ContainerStarted","Data":"86a0c16674f85e7303509ab54d50bbff39b0287cc10cc79eccb2a8f56d8e2d4a"} Jan 22 17:49:38 crc kubenswrapper[5032]: I0122 17:49:38.219667 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:49:39 crc kubenswrapper[5032]: I0122 17:49:39.231540 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dptvr" event={"ID":"4227669d-b1ba-4dcf-976c-940162e7d253","Type":"ContainerStarted","Data":"9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3"} Jan 22 17:49:42 crc kubenswrapper[5032]: I0122 17:49:42.270169 5032 generic.go:334] "Generic (PLEG): container finished" podID="4227669d-b1ba-4dcf-976c-940162e7d253" containerID="9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3" exitCode=0 Jan 22 17:49:42 crc kubenswrapper[5032]: I0122 17:49:42.270272 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dptvr" event={"ID":"4227669d-b1ba-4dcf-976c-940162e7d253","Type":"ContainerDied","Data":"9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3"} Jan 22 17:49:43 crc kubenswrapper[5032]: I0122 17:49:43.285600 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dptvr" event={"ID":"4227669d-b1ba-4dcf-976c-940162e7d253","Type":"ContainerStarted","Data":"76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d"} Jan 22 17:49:43 crc kubenswrapper[5032]: I0122 17:49:43.308294 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dptvr" podStartSLOduration=2.78286971 podStartE2EDuration="7.308267597s" podCreationTimestamp="2026-01-22 17:49:36 +0000 UTC" firstStartedPulling="2026-01-22 17:49:38.219440077 +0000 UTC m=+6282.031609108" lastFinishedPulling="2026-01-22 17:49:42.744837954 +0000 UTC m=+6286.557006995" observedRunningTime="2026-01-22 17:49:43.305467663 +0000 UTC m=+6287.117636714" watchObservedRunningTime="2026-01-22 17:49:43.308267597 +0000 UTC m=+6287.120436648" Jan 22 17:49:47 crc kubenswrapper[5032]: I0122 17:49:47.171059 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:47 crc kubenswrapper[5032]: I0122 17:49:47.171631 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:48 crc kubenswrapper[5032]: I0122 17:49:48.247104 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dptvr" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="registry-server" probeResult="failure" output=< Jan 22 17:49:48 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 17:49:48 crc kubenswrapper[5032]: > Jan 22 17:49:57 crc kubenswrapper[5032]: I0122 17:49:57.320466 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:57 crc kubenswrapper[5032]: I0122 17:49:57.401571 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:57 crc kubenswrapper[5032]: I0122 17:49:57.565161 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dptvr"] Jan 22 17:49:58 crc kubenswrapper[5032]: I0122 17:49:58.409765 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dptvr" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="registry-server" containerID="cri-o://76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d" gracePeriod=2 Jan 22 17:49:58 crc kubenswrapper[5032]: I0122 17:49:58.892749 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.036491 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-catalog-content\") pod \"4227669d-b1ba-4dcf-976c-940162e7d253\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.036531 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5j9sl\" (UniqueName: \"kubernetes.io/projected/4227669d-b1ba-4dcf-976c-940162e7d253-kube-api-access-5j9sl\") pod \"4227669d-b1ba-4dcf-976c-940162e7d253\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.036736 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-utilities\") pod \"4227669d-b1ba-4dcf-976c-940162e7d253\" (UID: \"4227669d-b1ba-4dcf-976c-940162e7d253\") " Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.037523 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-utilities" (OuterVolumeSpecName: "utilities") pod "4227669d-b1ba-4dcf-976c-940162e7d253" (UID: "4227669d-b1ba-4dcf-976c-940162e7d253"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.042149 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4227669d-b1ba-4dcf-976c-940162e7d253-kube-api-access-5j9sl" (OuterVolumeSpecName: "kube-api-access-5j9sl") pod "4227669d-b1ba-4dcf-976c-940162e7d253" (UID: "4227669d-b1ba-4dcf-976c-940162e7d253"). InnerVolumeSpecName "kube-api-access-5j9sl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.138647 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5j9sl\" (UniqueName: \"kubernetes.io/projected/4227669d-b1ba-4dcf-976c-940162e7d253-kube-api-access-5j9sl\") on node \"crc\" DevicePath \"\"" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.138896 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.146695 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4227669d-b1ba-4dcf-976c-940162e7d253" (UID: "4227669d-b1ba-4dcf-976c-940162e7d253"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.240277 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4227669d-b1ba-4dcf-976c-940162e7d253-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.418741 5032 generic.go:334] "Generic (PLEG): container finished" podID="4227669d-b1ba-4dcf-976c-940162e7d253" containerID="76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d" exitCode=0 Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.418789 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dptvr" event={"ID":"4227669d-b1ba-4dcf-976c-940162e7d253","Type":"ContainerDied","Data":"76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d"} Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.418817 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dptvr" event={"ID":"4227669d-b1ba-4dcf-976c-940162e7d253","Type":"ContainerDied","Data":"86a0c16674f85e7303509ab54d50bbff39b0287cc10cc79eccb2a8f56d8e2d4a"} Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.418836 5032 scope.go:117] "RemoveContainer" containerID="76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.418983 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dptvr" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.448481 5032 scope.go:117] "RemoveContainer" containerID="9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.459088 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dptvr"] Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.468076 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dptvr"] Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.475102 5032 scope.go:117] "RemoveContainer" containerID="687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.508433 5032 scope.go:117] "RemoveContainer" containerID="76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d" Jan 22 17:49:59 crc kubenswrapper[5032]: E0122 17:49:59.508852 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d\": container with ID starting with 76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d not found: ID does not exist" containerID="76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.508955 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d"} err="failed to get container status \"76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d\": rpc error: code = NotFound desc = could not find container \"76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d\": container with ID starting with 76a733931af22d62c4495f2ea9e58776626a4315796680bc3fd6f4752c1a070d not found: ID does not exist" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.508974 5032 scope.go:117] "RemoveContainer" containerID="9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3" Jan 22 17:49:59 crc kubenswrapper[5032]: E0122 17:49:59.509702 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3\": container with ID starting with 9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3 not found: ID does not exist" containerID="9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.509736 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3"} err="failed to get container status \"9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3\": rpc error: code = NotFound desc = could not find container \"9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3\": container with ID starting with 9366a990f0e3838a1144e2bddc4a4b8f553d670052fcb01bcab654753b7aacb3 not found: ID does not exist" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.509753 5032 scope.go:117] "RemoveContainer" containerID="687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce" Jan 22 17:49:59 crc kubenswrapper[5032]: E0122 17:49:59.509967 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce\": container with ID starting with 687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce not found: ID does not exist" containerID="687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce" Jan 22 17:49:59 crc kubenswrapper[5032]: I0122 17:49:59.509988 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce"} err="failed to get container status \"687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce\": rpc error: code = NotFound desc = could not find container \"687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce\": container with ID starting with 687a5f54a7b0e24ea51661c51b907a1f41fd4442ca9d3e048173367642e1fcce not found: ID does not exist" Jan 22 17:50:00 crc kubenswrapper[5032]: I0122 17:50:00.469464 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" path="/var/lib/kubelet/pods/4227669d-b1ba-4dcf-976c-940162e7d253/volumes" Jan 22 17:50:11 crc kubenswrapper[5032]: I0122 17:50:11.946079 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x8ckd"] Jan 22 17:50:11 crc kubenswrapper[5032]: E0122 17:50:11.947419 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="extract-utilities" Jan 22 17:50:11 crc kubenswrapper[5032]: I0122 17:50:11.947443 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="extract-utilities" Jan 22 17:50:11 crc kubenswrapper[5032]: E0122 17:50:11.947475 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="extract-content" Jan 22 17:50:11 crc kubenswrapper[5032]: I0122 17:50:11.947489 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="extract-content" Jan 22 17:50:11 crc kubenswrapper[5032]: E0122 17:50:11.947540 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="registry-server" Jan 22 17:50:11 crc kubenswrapper[5032]: I0122 17:50:11.947551 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="registry-server" Jan 22 17:50:11 crc kubenswrapper[5032]: I0122 17:50:11.947854 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="4227669d-b1ba-4dcf-976c-940162e7d253" containerName="registry-server" Jan 22 17:50:11 crc kubenswrapper[5032]: I0122 17:50:11.964884 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:11 crc kubenswrapper[5032]: I0122 17:50:11.987703 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x8ckd"] Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.076157 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b96k\" (UniqueName: \"kubernetes.io/projected/95a2a970-1e77-4f1a-86f8-cba26e3218ff-kube-api-access-7b96k\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.076218 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-catalog-content\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.076300 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-utilities\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.178532 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b96k\" (UniqueName: \"kubernetes.io/projected/95a2a970-1e77-4f1a-86f8-cba26e3218ff-kube-api-access-7b96k\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.178589 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-catalog-content\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.178666 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-utilities\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.179261 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-catalog-content\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.179305 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-utilities\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.206569 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b96k\" (UniqueName: \"kubernetes.io/projected/95a2a970-1e77-4f1a-86f8-cba26e3218ff-kube-api-access-7b96k\") pod \"redhat-marketplace-x8ckd\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.294624 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:12 crc kubenswrapper[5032]: I0122 17:50:12.777310 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x8ckd"] Jan 22 17:50:13 crc kubenswrapper[5032]: I0122 17:50:13.569748 5032 generic.go:334] "Generic (PLEG): container finished" podID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerID="e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef" exitCode=0 Jan 22 17:50:13 crc kubenswrapper[5032]: I0122 17:50:13.570117 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x8ckd" event={"ID":"95a2a970-1e77-4f1a-86f8-cba26e3218ff","Type":"ContainerDied","Data":"e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef"} Jan 22 17:50:13 crc kubenswrapper[5032]: I0122 17:50:13.570153 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x8ckd" event={"ID":"95a2a970-1e77-4f1a-86f8-cba26e3218ff","Type":"ContainerStarted","Data":"e601eb71b698aa1582286a58e8723c4262d73d5b3104f6647c2321d03f8efea6"} Jan 22 17:50:15 crc kubenswrapper[5032]: I0122 17:50:15.593095 5032 generic.go:334] "Generic (PLEG): container finished" podID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerID="02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec" exitCode=0 Jan 22 17:50:15 crc kubenswrapper[5032]: I0122 17:50:15.593530 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x8ckd" event={"ID":"95a2a970-1e77-4f1a-86f8-cba26e3218ff","Type":"ContainerDied","Data":"02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec"} Jan 22 17:50:16 crc kubenswrapper[5032]: I0122 17:50:16.604411 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x8ckd" event={"ID":"95a2a970-1e77-4f1a-86f8-cba26e3218ff","Type":"ContainerStarted","Data":"9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708"} Jan 22 17:50:16 crc kubenswrapper[5032]: I0122 17:50:16.626756 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x8ckd" podStartSLOduration=2.837112857 podStartE2EDuration="5.626735972s" podCreationTimestamp="2026-01-22 17:50:11 +0000 UTC" firstStartedPulling="2026-01-22 17:50:13.57226868 +0000 UTC m=+6317.384437711" lastFinishedPulling="2026-01-22 17:50:16.361891795 +0000 UTC m=+6320.174060826" observedRunningTime="2026-01-22 17:50:16.620886486 +0000 UTC m=+6320.433055537" watchObservedRunningTime="2026-01-22 17:50:16.626735972 +0000 UTC m=+6320.438905003" Jan 22 17:50:22 crc kubenswrapper[5032]: I0122 17:50:22.295265 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:22 crc kubenswrapper[5032]: I0122 17:50:22.295743 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:22 crc kubenswrapper[5032]: I0122 17:50:22.359503 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:22 crc kubenswrapper[5032]: I0122 17:50:22.701073 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:22 crc kubenswrapper[5032]: I0122 17:50:22.755389 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x8ckd"] Jan 22 17:50:24 crc kubenswrapper[5032]: I0122 17:50:24.679756 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x8ckd" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="registry-server" containerID="cri-o://9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708" gracePeriod=2 Jan 22 17:50:25 crc kubenswrapper[5032]: E0122 17:50:24.996756 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95a2a970_1e77_4f1a_86f8_cba26e3218ff.slice/crio-conmon-9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95a2a970_1e77_4f1a_86f8_cba26e3218ff.slice/crio-9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708.scope\": RecentStats: unable to find data in memory cache]" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.242786 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.321749 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-catalog-content\") pod \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.321930 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-utilities\") pod \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.322017 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7b96k\" (UniqueName: \"kubernetes.io/projected/95a2a970-1e77-4f1a-86f8-cba26e3218ff-kube-api-access-7b96k\") pod \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\" (UID: \"95a2a970-1e77-4f1a-86f8-cba26e3218ff\") " Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.323023 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-utilities" (OuterVolumeSpecName: "utilities") pod "95a2a970-1e77-4f1a-86f8-cba26e3218ff" (UID: "95a2a970-1e77-4f1a-86f8-cba26e3218ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.333070 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95a2a970-1e77-4f1a-86f8-cba26e3218ff-kube-api-access-7b96k" (OuterVolumeSpecName: "kube-api-access-7b96k") pod "95a2a970-1e77-4f1a-86f8-cba26e3218ff" (UID: "95a2a970-1e77-4f1a-86f8-cba26e3218ff"). InnerVolumeSpecName "kube-api-access-7b96k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.350338 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "95a2a970-1e77-4f1a-86f8-cba26e3218ff" (UID: "95a2a970-1e77-4f1a-86f8-cba26e3218ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.423742 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.423783 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95a2a970-1e77-4f1a-86f8-cba26e3218ff-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.423796 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7b96k\" (UniqueName: \"kubernetes.io/projected/95a2a970-1e77-4f1a-86f8-cba26e3218ff-kube-api-access-7b96k\") on node \"crc\" DevicePath \"\"" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.690667 5032 generic.go:334] "Generic (PLEG): container finished" podID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerID="9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708" exitCode=0 Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.690714 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x8ckd" event={"ID":"95a2a970-1e77-4f1a-86f8-cba26e3218ff","Type":"ContainerDied","Data":"9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708"} Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.690745 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x8ckd" event={"ID":"95a2a970-1e77-4f1a-86f8-cba26e3218ff","Type":"ContainerDied","Data":"e601eb71b698aa1582286a58e8723c4262d73d5b3104f6647c2321d03f8efea6"} Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.690742 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x8ckd" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.690763 5032 scope.go:117] "RemoveContainer" containerID="9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.715591 5032 scope.go:117] "RemoveContainer" containerID="02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.743233 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x8ckd"] Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.750049 5032 scope.go:117] "RemoveContainer" containerID="e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.753441 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x8ckd"] Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.804918 5032 scope.go:117] "RemoveContainer" containerID="9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708" Jan 22 17:50:25 crc kubenswrapper[5032]: E0122 17:50:25.805481 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708\": container with ID starting with 9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708 not found: ID does not exist" containerID="9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.805555 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708"} err="failed to get container status \"9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708\": rpc error: code = NotFound desc = could not find container \"9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708\": container with ID starting with 9c14f7d1c0c6f9f984cf2bdf76be403c3f68944a669e944b801b8a2e37870708 not found: ID does not exist" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.805597 5032 scope.go:117] "RemoveContainer" containerID="02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec" Jan 22 17:50:25 crc kubenswrapper[5032]: E0122 17:50:25.806204 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec\": container with ID starting with 02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec not found: ID does not exist" containerID="02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.806242 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec"} err="failed to get container status \"02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec\": rpc error: code = NotFound desc = could not find container \"02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec\": container with ID starting with 02f720b833f456c7c6db4422735b7e3b0577140eb7b28cb7527c9f8c017a37ec not found: ID does not exist" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.806263 5032 scope.go:117] "RemoveContainer" containerID="e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef" Jan 22 17:50:25 crc kubenswrapper[5032]: E0122 17:50:25.806646 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef\": container with ID starting with e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef not found: ID does not exist" containerID="e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef" Jan 22 17:50:25 crc kubenswrapper[5032]: I0122 17:50:25.806688 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef"} err="failed to get container status \"e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef\": rpc error: code = NotFound desc = could not find container \"e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef\": container with ID starting with e64900d15ca3a35180e2e459367b51f368910904261bbe288c5d7bd0a9ea97ef not found: ID does not exist" Jan 22 17:50:26 crc kubenswrapper[5032]: I0122 17:50:26.465161 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" path="/var/lib/kubelet/pods/95a2a970-1e77-4f1a-86f8-cba26e3218ff/volumes" Jan 22 17:51:33 crc kubenswrapper[5032]: I0122 17:51:33.017225 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:51:33 crc kubenswrapper[5032]: I0122 17:51:33.017722 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:52:03 crc kubenswrapper[5032]: I0122 17:52:03.017229 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:52:03 crc kubenswrapper[5032]: I0122 17:52:03.017839 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.017355 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.017970 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.018014 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.018687 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.018741 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" gracePeriod=600 Jan 22 17:52:33 crc kubenswrapper[5032]: E0122 17:52:33.136987 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.868779 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" exitCode=0 Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.868827 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57"} Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.868884 5032 scope.go:117] "RemoveContainer" containerID="722f347463ab474d0fb60065c7a7bdf76e6459e6bd256a877d3be60b771c86f7" Jan 22 17:52:33 crc kubenswrapper[5032]: I0122 17:52:33.869469 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:52:33 crc kubenswrapper[5032]: E0122 17:52:33.870134 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:52:44 crc kubenswrapper[5032]: I0122 17:52:44.455025 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:52:44 crc kubenswrapper[5032]: E0122 17:52:44.455951 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:52:58 crc kubenswrapper[5032]: I0122 17:52:58.454505 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:52:58 crc kubenswrapper[5032]: E0122 17:52:58.455286 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:53:13 crc kubenswrapper[5032]: I0122 17:53:13.455073 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:53:13 crc kubenswrapper[5032]: E0122 17:53:13.456532 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:53:28 crc kubenswrapper[5032]: I0122 17:53:28.455549 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:53:28 crc kubenswrapper[5032]: E0122 17:53:28.456848 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:53:41 crc kubenswrapper[5032]: I0122 17:53:41.455453 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:53:41 crc kubenswrapper[5032]: E0122 17:53:41.456219 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:53:56 crc kubenswrapper[5032]: I0122 17:53:56.461681 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:53:56 crc kubenswrapper[5032]: E0122 17:53:56.462580 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:54:11 crc kubenswrapper[5032]: I0122 17:54:11.454506 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:54:11 crc kubenswrapper[5032]: E0122 17:54:11.455352 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:54:23 crc kubenswrapper[5032]: I0122 17:54:23.455054 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:54:23 crc kubenswrapper[5032]: E0122 17:54:23.455877 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.792979 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fn7m7"] Jan 22 17:54:32 crc kubenswrapper[5032]: E0122 17:54:32.793965 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="extract-utilities" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.794054 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="extract-utilities" Jan 22 17:54:32 crc kubenswrapper[5032]: E0122 17:54:32.794074 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="registry-server" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.794080 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="registry-server" Jan 22 17:54:32 crc kubenswrapper[5032]: E0122 17:54:32.794090 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="extract-content" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.794097 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="extract-content" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.794277 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="95a2a970-1e77-4f1a-86f8-cba26e3218ff" containerName="registry-server" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.795908 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.812200 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fn7m7"] Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.850678 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9cxm\" (UniqueName: \"kubernetes.io/projected/cbea77c3-ba2e-41de-9746-bf65848a0137-kube-api-access-p9cxm\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.850750 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-catalog-content\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.850831 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-utilities\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.953228 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9cxm\" (UniqueName: \"kubernetes.io/projected/cbea77c3-ba2e-41de-9746-bf65848a0137-kube-api-access-p9cxm\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.953285 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-catalog-content\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.953328 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-utilities\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.953799 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-utilities\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.953814 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-catalog-content\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:32 crc kubenswrapper[5032]: I0122 17:54:32.987807 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9cxm\" (UniqueName: \"kubernetes.io/projected/cbea77c3-ba2e-41de-9746-bf65848a0137-kube-api-access-p9cxm\") pod \"community-operators-fn7m7\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:33 crc kubenswrapper[5032]: I0122 17:54:33.122444 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:33 crc kubenswrapper[5032]: I0122 17:54:33.672920 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fn7m7"] Jan 22 17:54:33 crc kubenswrapper[5032]: I0122 17:54:33.952055 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fn7m7" event={"ID":"cbea77c3-ba2e-41de-9746-bf65848a0137","Type":"ContainerStarted","Data":"f47b778444a1664390f8e46293eb7624fc871ea02b47127fb0a453bf0e6c538e"} Jan 22 17:54:34 crc kubenswrapper[5032]: I0122 17:54:34.963961 5032 generic.go:334] "Generic (PLEG): container finished" podID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerID="47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41" exitCode=0 Jan 22 17:54:34 crc kubenswrapper[5032]: I0122 17:54:34.964018 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fn7m7" event={"ID":"cbea77c3-ba2e-41de-9746-bf65848a0137","Type":"ContainerDied","Data":"47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41"} Jan 22 17:54:35 crc kubenswrapper[5032]: I0122 17:54:35.976769 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fn7m7" event={"ID":"cbea77c3-ba2e-41de-9746-bf65848a0137","Type":"ContainerStarted","Data":"d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7"} Jan 22 17:54:36 crc kubenswrapper[5032]: I0122 17:54:36.989036 5032 generic.go:334] "Generic (PLEG): container finished" podID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerID="d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7" exitCode=0 Jan 22 17:54:36 crc kubenswrapper[5032]: I0122 17:54:36.989098 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fn7m7" event={"ID":"cbea77c3-ba2e-41de-9746-bf65848a0137","Type":"ContainerDied","Data":"d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7"} Jan 22 17:54:38 crc kubenswrapper[5032]: I0122 17:54:37.999975 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fn7m7" event={"ID":"cbea77c3-ba2e-41de-9746-bf65848a0137","Type":"ContainerStarted","Data":"8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189"} Jan 22 17:54:38 crc kubenswrapper[5032]: I0122 17:54:38.051230 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fn7m7" podStartSLOduration=3.621720973 podStartE2EDuration="6.0511221s" podCreationTimestamp="2026-01-22 17:54:32 +0000 UTC" firstStartedPulling="2026-01-22 17:54:34.966540445 +0000 UTC m=+6578.778709476" lastFinishedPulling="2026-01-22 17:54:37.395941572 +0000 UTC m=+6581.208110603" observedRunningTime="2026-01-22 17:54:38.020096233 +0000 UTC m=+6581.832265274" watchObservedRunningTime="2026-01-22 17:54:38.0511221 +0000 UTC m=+6581.863291141" Jan 22 17:54:38 crc kubenswrapper[5032]: I0122 17:54:38.455461 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:54:38 crc kubenswrapper[5032]: E0122 17:54:38.455807 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:54:43 crc kubenswrapper[5032]: I0122 17:54:43.123950 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:43 crc kubenswrapper[5032]: I0122 17:54:43.124595 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:43 crc kubenswrapper[5032]: I0122 17:54:43.181531 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:44 crc kubenswrapper[5032]: I0122 17:54:44.100321 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:44 crc kubenswrapper[5032]: I0122 17:54:44.156537 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fn7m7"] Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.075070 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fn7m7" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="registry-server" containerID="cri-o://8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189" gracePeriod=2 Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.550477 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.625450 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-catalog-content\") pod \"cbea77c3-ba2e-41de-9746-bf65848a0137\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.625522 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9cxm\" (UniqueName: \"kubernetes.io/projected/cbea77c3-ba2e-41de-9746-bf65848a0137-kube-api-access-p9cxm\") pod \"cbea77c3-ba2e-41de-9746-bf65848a0137\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.625638 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-utilities\") pod \"cbea77c3-ba2e-41de-9746-bf65848a0137\" (UID: \"cbea77c3-ba2e-41de-9746-bf65848a0137\") " Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.626978 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-utilities" (OuterVolumeSpecName: "utilities") pod "cbea77c3-ba2e-41de-9746-bf65848a0137" (UID: "cbea77c3-ba2e-41de-9746-bf65848a0137"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.633298 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbea77c3-ba2e-41de-9746-bf65848a0137-kube-api-access-p9cxm" (OuterVolumeSpecName: "kube-api-access-p9cxm") pod "cbea77c3-ba2e-41de-9746-bf65848a0137" (UID: "cbea77c3-ba2e-41de-9746-bf65848a0137"). InnerVolumeSpecName "kube-api-access-p9cxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.729366 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9cxm\" (UniqueName: \"kubernetes.io/projected/cbea77c3-ba2e-41de-9746-bf65848a0137-kube-api-access-p9cxm\") on node \"crc\" DevicePath \"\"" Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.729462 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.825463 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cbea77c3-ba2e-41de-9746-bf65848a0137" (UID: "cbea77c3-ba2e-41de-9746-bf65848a0137"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:54:46 crc kubenswrapper[5032]: I0122 17:54:46.832000 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbea77c3-ba2e-41de-9746-bf65848a0137-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.086002 5032 generic.go:334] "Generic (PLEG): container finished" podID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerID="8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189" exitCode=0 Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.086052 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fn7m7" event={"ID":"cbea77c3-ba2e-41de-9746-bf65848a0137","Type":"ContainerDied","Data":"8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189"} Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.086090 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fn7m7" event={"ID":"cbea77c3-ba2e-41de-9746-bf65848a0137","Type":"ContainerDied","Data":"f47b778444a1664390f8e46293eb7624fc871ea02b47127fb0a453bf0e6c538e"} Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.086117 5032 scope.go:117] "RemoveContainer" containerID="8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.086144 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fn7m7" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.105234 5032 scope.go:117] "RemoveContainer" containerID="d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.126790 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fn7m7"] Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.133082 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fn7m7"] Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.146983 5032 scope.go:117] "RemoveContainer" containerID="47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.186742 5032 scope.go:117] "RemoveContainer" containerID="8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189" Jan 22 17:54:47 crc kubenswrapper[5032]: E0122 17:54:47.187444 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189\": container with ID starting with 8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189 not found: ID does not exist" containerID="8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.187492 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189"} err="failed to get container status \"8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189\": rpc error: code = NotFound desc = could not find container \"8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189\": container with ID starting with 8aad81bca53f39c34a0bde83f49bab245b22f34717a7d95ed2dc7895dbba2189 not found: ID does not exist" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.187513 5032 scope.go:117] "RemoveContainer" containerID="d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7" Jan 22 17:54:47 crc kubenswrapper[5032]: E0122 17:54:47.188329 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7\": container with ID starting with d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7 not found: ID does not exist" containerID="d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.188381 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7"} err="failed to get container status \"d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7\": rpc error: code = NotFound desc = could not find container \"d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7\": container with ID starting with d28a3d78358395bdecac535f3ab7e8978fabf7064bdb95d2122354a869f291a7 not found: ID does not exist" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.188399 5032 scope.go:117] "RemoveContainer" containerID="47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41" Jan 22 17:54:47 crc kubenswrapper[5032]: E0122 17:54:47.188941 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41\": container with ID starting with 47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41 not found: ID does not exist" containerID="47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41" Jan 22 17:54:47 crc kubenswrapper[5032]: I0122 17:54:47.188988 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41"} err="failed to get container status \"47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41\": rpc error: code = NotFound desc = could not find container \"47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41\": container with ID starting with 47a1b4e9d75b864afc47c93496b0688624bc096600b2193712ba3e29d44bbf41 not found: ID does not exist" Jan 22 17:54:48 crc kubenswrapper[5032]: I0122 17:54:48.471717 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" path="/var/lib/kubelet/pods/cbea77c3-ba2e-41de-9746-bf65848a0137/volumes" Jan 22 17:54:50 crc kubenswrapper[5032]: I0122 17:54:50.454726 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:54:50 crc kubenswrapper[5032]: E0122 17:54:50.455152 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:55:01 crc kubenswrapper[5032]: I0122 17:55:01.455088 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:55:01 crc kubenswrapper[5032]: E0122 17:55:01.456016 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:55:12 crc kubenswrapper[5032]: I0122 17:55:12.455588 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:55:12 crc kubenswrapper[5032]: E0122 17:55:12.456490 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:55:26 crc kubenswrapper[5032]: I0122 17:55:26.460111 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:55:26 crc kubenswrapper[5032]: E0122 17:55:26.460992 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:55:40 crc kubenswrapper[5032]: I0122 17:55:40.456772 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:55:40 crc kubenswrapper[5032]: E0122 17:55:40.457736 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:55:52 crc kubenswrapper[5032]: I0122 17:55:52.455037 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:55:52 crc kubenswrapper[5032]: E0122 17:55:52.456129 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:56:04 crc kubenswrapper[5032]: I0122 17:56:04.455323 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:56:04 crc kubenswrapper[5032]: E0122 17:56:04.457617 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:56:18 crc kubenswrapper[5032]: I0122 17:56:18.454785 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:56:18 crc kubenswrapper[5032]: E0122 17:56:18.455537 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:56:33 crc kubenswrapper[5032]: I0122 17:56:33.454808 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:56:33 crc kubenswrapper[5032]: E0122 17:56:33.455798 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:56:44 crc kubenswrapper[5032]: I0122 17:56:44.455623 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:56:44 crc kubenswrapper[5032]: E0122 17:56:44.456551 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.312045 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mw92p"] Jan 22 17:56:46 crc kubenswrapper[5032]: E0122 17:56:46.312937 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="registry-server" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.312955 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="registry-server" Jan 22 17:56:46 crc kubenswrapper[5032]: E0122 17:56:46.312975 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="extract-utilities" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.312984 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="extract-utilities" Jan 22 17:56:46 crc kubenswrapper[5032]: E0122 17:56:46.313031 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="extract-content" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.313042 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="extract-content" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.313313 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbea77c3-ba2e-41de-9746-bf65848a0137" containerName="registry-server" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.315004 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.353976 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mw92p"] Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.375911 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkdrq\" (UniqueName: \"kubernetes.io/projected/245f89b8-8254-4b90-a717-f8ed4414133e-kube-api-access-vkdrq\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.376221 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-utilities\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.376266 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-catalog-content\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.478429 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkdrq\" (UniqueName: \"kubernetes.io/projected/245f89b8-8254-4b90-a717-f8ed4414133e-kube-api-access-vkdrq\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.478494 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-utilities\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.478552 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-catalog-content\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.478993 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-utilities\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.479027 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-catalog-content\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.509354 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkdrq\" (UniqueName: \"kubernetes.io/projected/245f89b8-8254-4b90-a717-f8ed4414133e-kube-api-access-vkdrq\") pod \"certified-operators-mw92p\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:46 crc kubenswrapper[5032]: I0122 17:56:46.646613 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:47 crc kubenswrapper[5032]: I0122 17:56:47.199512 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mw92p"] Jan 22 17:56:48 crc kubenswrapper[5032]: I0122 17:56:48.167978 5032 generic.go:334] "Generic (PLEG): container finished" podID="245f89b8-8254-4b90-a717-f8ed4414133e" containerID="5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a" exitCode=0 Jan 22 17:56:48 crc kubenswrapper[5032]: I0122 17:56:48.168046 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mw92p" event={"ID":"245f89b8-8254-4b90-a717-f8ed4414133e","Type":"ContainerDied","Data":"5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a"} Jan 22 17:56:48 crc kubenswrapper[5032]: I0122 17:56:48.168262 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mw92p" event={"ID":"245f89b8-8254-4b90-a717-f8ed4414133e","Type":"ContainerStarted","Data":"08fd14084ae004474dd6a1a0385d088025f7b39d8f809fc10dd5dcac6244eedc"} Jan 22 17:56:48 crc kubenswrapper[5032]: I0122 17:56:48.170367 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 17:56:49 crc kubenswrapper[5032]: I0122 17:56:49.180323 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mw92p" event={"ID":"245f89b8-8254-4b90-a717-f8ed4414133e","Type":"ContainerStarted","Data":"7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0"} Jan 22 17:56:50 crc kubenswrapper[5032]: I0122 17:56:50.194164 5032 generic.go:334] "Generic (PLEG): container finished" podID="245f89b8-8254-4b90-a717-f8ed4414133e" containerID="7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0" exitCode=0 Jan 22 17:56:50 crc kubenswrapper[5032]: I0122 17:56:50.194260 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mw92p" event={"ID":"245f89b8-8254-4b90-a717-f8ed4414133e","Type":"ContainerDied","Data":"7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0"} Jan 22 17:56:51 crc kubenswrapper[5032]: I0122 17:56:51.207805 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mw92p" event={"ID":"245f89b8-8254-4b90-a717-f8ed4414133e","Type":"ContainerStarted","Data":"867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b"} Jan 22 17:56:51 crc kubenswrapper[5032]: I0122 17:56:51.229536 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mw92p" podStartSLOduration=2.522755251 podStartE2EDuration="5.229519896s" podCreationTimestamp="2026-01-22 17:56:46 +0000 UTC" firstStartedPulling="2026-01-22 17:56:48.169938029 +0000 UTC m=+6711.982107070" lastFinishedPulling="2026-01-22 17:56:50.876702684 +0000 UTC m=+6714.688871715" observedRunningTime="2026-01-22 17:56:51.227959404 +0000 UTC m=+6715.040128445" watchObservedRunningTime="2026-01-22 17:56:51.229519896 +0000 UTC m=+6715.041688917" Jan 22 17:56:56 crc kubenswrapper[5032]: I0122 17:56:56.647009 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:56 crc kubenswrapper[5032]: I0122 17:56:56.647611 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:56 crc kubenswrapper[5032]: I0122 17:56:56.721123 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:57 crc kubenswrapper[5032]: I0122 17:56:57.308583 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:57 crc kubenswrapper[5032]: I0122 17:56:57.355251 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mw92p"] Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.286301 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mw92p" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="registry-server" containerID="cri-o://867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b" gracePeriod=2 Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.454789 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:56:59 crc kubenswrapper[5032]: E0122 17:56:59.455186 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.735966 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.851751 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkdrq\" (UniqueName: \"kubernetes.io/projected/245f89b8-8254-4b90-a717-f8ed4414133e-kube-api-access-vkdrq\") pod \"245f89b8-8254-4b90-a717-f8ed4414133e\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.851825 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-utilities\") pod \"245f89b8-8254-4b90-a717-f8ed4414133e\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.851946 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-catalog-content\") pod \"245f89b8-8254-4b90-a717-f8ed4414133e\" (UID: \"245f89b8-8254-4b90-a717-f8ed4414133e\") " Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.852629 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-utilities" (OuterVolumeSpecName: "utilities") pod "245f89b8-8254-4b90-a717-f8ed4414133e" (UID: "245f89b8-8254-4b90-a717-f8ed4414133e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.861087 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/245f89b8-8254-4b90-a717-f8ed4414133e-kube-api-access-vkdrq" (OuterVolumeSpecName: "kube-api-access-vkdrq") pod "245f89b8-8254-4b90-a717-f8ed4414133e" (UID: "245f89b8-8254-4b90-a717-f8ed4414133e"). InnerVolumeSpecName "kube-api-access-vkdrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.899034 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "245f89b8-8254-4b90-a717-f8ed4414133e" (UID: "245f89b8-8254-4b90-a717-f8ed4414133e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.953957 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkdrq\" (UniqueName: \"kubernetes.io/projected/245f89b8-8254-4b90-a717-f8ed4414133e-kube-api-access-vkdrq\") on node \"crc\" DevicePath \"\"" Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.954209 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 17:56:59 crc kubenswrapper[5032]: I0122 17:56:59.954280 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245f89b8-8254-4b90-a717-f8ed4414133e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.296169 5032 generic.go:334] "Generic (PLEG): container finished" podID="245f89b8-8254-4b90-a717-f8ed4414133e" containerID="867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b" exitCode=0 Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.296230 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mw92p" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.296233 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mw92p" event={"ID":"245f89b8-8254-4b90-a717-f8ed4414133e","Type":"ContainerDied","Data":"867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b"} Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.296664 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mw92p" event={"ID":"245f89b8-8254-4b90-a717-f8ed4414133e","Type":"ContainerDied","Data":"08fd14084ae004474dd6a1a0385d088025f7b39d8f809fc10dd5dcac6244eedc"} Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.296694 5032 scope.go:117] "RemoveContainer" containerID="867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.314029 5032 scope.go:117] "RemoveContainer" containerID="7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.335798 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mw92p"] Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.344478 5032 scope.go:117] "RemoveContainer" containerID="5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.346914 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mw92p"] Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.381264 5032 scope.go:117] "RemoveContainer" containerID="867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b" Jan 22 17:57:00 crc kubenswrapper[5032]: E0122 17:57:00.381692 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b\": container with ID starting with 867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b not found: ID does not exist" containerID="867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.381754 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b"} err="failed to get container status \"867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b\": rpc error: code = NotFound desc = could not find container \"867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b\": container with ID starting with 867cbbad1609aecc28daab0e6473e905cf4cb773daad54863c8581ee03768b6b not found: ID does not exist" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.381795 5032 scope.go:117] "RemoveContainer" containerID="7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0" Jan 22 17:57:00 crc kubenswrapper[5032]: E0122 17:57:00.382361 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0\": container with ID starting with 7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0 not found: ID does not exist" containerID="7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.382402 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0"} err="failed to get container status \"7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0\": rpc error: code = NotFound desc = could not find container \"7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0\": container with ID starting with 7dc5efda5c1c04c0f6f8ee9c58438885c110eccab2014f9c20df9ea6e79831b0 not found: ID does not exist" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.382429 5032 scope.go:117] "RemoveContainer" containerID="5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a" Jan 22 17:57:00 crc kubenswrapper[5032]: E0122 17:57:00.383100 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a\": container with ID starting with 5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a not found: ID does not exist" containerID="5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.383143 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a"} err="failed to get container status \"5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a\": rpc error: code = NotFound desc = could not find container \"5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a\": container with ID starting with 5a384d4ebc48a85e783f0c12845033ddedc87aa1b72215e81ebf4a875d93850a not found: ID does not exist" Jan 22 17:57:00 crc kubenswrapper[5032]: I0122 17:57:00.469098 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" path="/var/lib/kubelet/pods/245f89b8-8254-4b90-a717-f8ed4414133e/volumes" Jan 22 17:57:10 crc kubenswrapper[5032]: I0122 17:57:10.454532 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:57:10 crc kubenswrapper[5032]: E0122 17:57:10.455195 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:57:22 crc kubenswrapper[5032]: I0122 17:57:22.455420 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:57:22 crc kubenswrapper[5032]: E0122 17:57:22.456254 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 17:57:34 crc kubenswrapper[5032]: I0122 17:57:34.456068 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 17:57:34 crc kubenswrapper[5032]: I0122 17:57:34.863068 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"072818648ffa3dd9f9b5b2015d59fcee6839118163a747be31afa83595cfb477"} Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.142365 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2"] Jan 22 18:00:00 crc kubenswrapper[5032]: E0122 18:00:00.143212 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="extract-content" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.143225 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="extract-content" Jan 22 18:00:00 crc kubenswrapper[5032]: E0122 18:00:00.143240 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="registry-server" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.143246 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="registry-server" Jan 22 18:00:00 crc kubenswrapper[5032]: E0122 18:00:00.143256 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="extract-utilities" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.143263 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="extract-utilities" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.143453 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="245f89b8-8254-4b90-a717-f8ed4414133e" containerName="registry-server" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.144024 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.152643 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.152743 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.164912 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2"] Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.231743 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-config-volume\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.231786 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkjvq\" (UniqueName: \"kubernetes.io/projected/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-kube-api-access-qkjvq\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.231945 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-secret-volume\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.333567 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-config-volume\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.333938 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkjvq\" (UniqueName: \"kubernetes.io/projected/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-kube-api-access-qkjvq\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.333966 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-secret-volume\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.335751 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-config-volume\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.341834 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-secret-volume\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.355088 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkjvq\" (UniqueName: \"kubernetes.io/projected/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-kube-api-access-qkjvq\") pod \"collect-profiles-29485080-fm7z2\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.470592 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:00 crc kubenswrapper[5032]: I0122 18:00:00.918137 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2"] Jan 22 18:00:01 crc kubenswrapper[5032]: I0122 18:00:01.211266 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" event={"ID":"b6e9a72b-0c2f-47ee-96cd-93a350ae6720","Type":"ContainerStarted","Data":"9bbb7f482531d7af1c606e18945a5d873c37b4521bc7cad7f28781d4d54cd8ea"} Jan 22 18:00:01 crc kubenswrapper[5032]: I0122 18:00:01.211575 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" event={"ID":"b6e9a72b-0c2f-47ee-96cd-93a350ae6720","Type":"ContainerStarted","Data":"9ac744871f3fb8e825cf131b872cbdb0fb01b6463dd97cd5b56d3dc4a74f74b9"} Jan 22 18:00:01 crc kubenswrapper[5032]: I0122 18:00:01.257732 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" podStartSLOduration=1.2577107889999999 podStartE2EDuration="1.257710789s" podCreationTimestamp="2026-01-22 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 18:00:01.234614054 +0000 UTC m=+6905.046783105" watchObservedRunningTime="2026-01-22 18:00:01.257710789 +0000 UTC m=+6905.069879820" Jan 22 18:00:02 crc kubenswrapper[5032]: I0122 18:00:02.268711 5032 generic.go:334] "Generic (PLEG): container finished" podID="b6e9a72b-0c2f-47ee-96cd-93a350ae6720" containerID="9bbb7f482531d7af1c606e18945a5d873c37b4521bc7cad7f28781d4d54cd8ea" exitCode=0 Jan 22 18:00:02 crc kubenswrapper[5032]: I0122 18:00:02.269101 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" event={"ID":"b6e9a72b-0c2f-47ee-96cd-93a350ae6720","Type":"ContainerDied","Data":"9bbb7f482531d7af1c606e18945a5d873c37b4521bc7cad7f28781d4d54cd8ea"} Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.016706 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.016779 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.727346 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.818562 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-config-volume\") pod \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.818680 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkjvq\" (UniqueName: \"kubernetes.io/projected/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-kube-api-access-qkjvq\") pod \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.818850 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-secret-volume\") pod \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\" (UID: \"b6e9a72b-0c2f-47ee-96cd-93a350ae6720\") " Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.819357 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-config-volume" (OuterVolumeSpecName: "config-volume") pod "b6e9a72b-0c2f-47ee-96cd-93a350ae6720" (UID: "b6e9a72b-0c2f-47ee-96cd-93a350ae6720"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.819778 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.826279 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-kube-api-access-qkjvq" (OuterVolumeSpecName: "kube-api-access-qkjvq") pod "b6e9a72b-0c2f-47ee-96cd-93a350ae6720" (UID: "b6e9a72b-0c2f-47ee-96cd-93a350ae6720"). InnerVolumeSpecName "kube-api-access-qkjvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.826402 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b6e9a72b-0c2f-47ee-96cd-93a350ae6720" (UID: "b6e9a72b-0c2f-47ee-96cd-93a350ae6720"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.921179 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 18:00:03 crc kubenswrapper[5032]: I0122 18:00:03.921220 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkjvq\" (UniqueName: \"kubernetes.io/projected/b6e9a72b-0c2f-47ee-96cd-93a350ae6720-kube-api-access-qkjvq\") on node \"crc\" DevicePath \"\"" Jan 22 18:00:04 crc kubenswrapper[5032]: I0122 18:00:04.291539 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" event={"ID":"b6e9a72b-0c2f-47ee-96cd-93a350ae6720","Type":"ContainerDied","Data":"9ac744871f3fb8e825cf131b872cbdb0fb01b6463dd97cd5b56d3dc4a74f74b9"} Jan 22 18:00:04 crc kubenswrapper[5032]: I0122 18:00:04.291592 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ac744871f3fb8e825cf131b872cbdb0fb01b6463dd97cd5b56d3dc4a74f74b9" Jan 22 18:00:04 crc kubenswrapper[5032]: I0122 18:00:04.291668 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485080-fm7z2" Jan 22 18:00:04 crc kubenswrapper[5032]: I0122 18:00:04.322320 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn"] Jan 22 18:00:04 crc kubenswrapper[5032]: I0122 18:00:04.330251 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485035-tk4dn"] Jan 22 18:00:04 crc kubenswrapper[5032]: I0122 18:00:04.465715 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6810aad1-56b6-42d7-a0b6-ea8757b6e8dd" path="/var/lib/kubelet/pods/6810aad1-56b6-42d7-a0b6-ea8757b6e8dd/volumes" Jan 22 18:00:21 crc kubenswrapper[5032]: I0122 18:00:21.889324 5032 scope.go:117] "RemoveContainer" containerID="4ce52aa1461ddf7871bef8a460035fafb21baa9e287ed3b799902e314584a2d8" Jan 22 18:00:33 crc kubenswrapper[5032]: I0122 18:00:33.016461 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:00:33 crc kubenswrapper[5032]: I0122 18:00:33.017101 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.209223 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ncgj8"] Jan 22 18:00:34 crc kubenswrapper[5032]: E0122 18:00:34.209921 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6e9a72b-0c2f-47ee-96cd-93a350ae6720" containerName="collect-profiles" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.209943 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6e9a72b-0c2f-47ee-96cd-93a350ae6720" containerName="collect-profiles" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.210368 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6e9a72b-0c2f-47ee-96cd-93a350ae6720" containerName="collect-profiles" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.212370 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.272023 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ncgj8"] Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.313848 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-catalog-content\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.314104 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdhx7\" (UniqueName: \"kubernetes.io/projected/476fed42-a934-4906-867f-852254c463a1-kube-api-access-sdhx7\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.314937 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-utilities\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.416882 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-catalog-content\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.416955 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdhx7\" (UniqueName: \"kubernetes.io/projected/476fed42-a934-4906-867f-852254c463a1-kube-api-access-sdhx7\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.417039 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-utilities\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.417505 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-catalog-content\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.417537 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-utilities\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.438556 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdhx7\" (UniqueName: \"kubernetes.io/projected/476fed42-a934-4906-867f-852254c463a1-kube-api-access-sdhx7\") pod \"redhat-operators-ncgj8\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:34 crc kubenswrapper[5032]: I0122 18:00:34.537945 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:35 crc kubenswrapper[5032]: I0122 18:00:34.992536 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ncgj8"] Jan 22 18:00:35 crc kubenswrapper[5032]: I0122 18:00:35.549289 5032 generic.go:334] "Generic (PLEG): container finished" podID="476fed42-a934-4906-867f-852254c463a1" containerID="4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7" exitCode=0 Jan 22 18:00:35 crc kubenswrapper[5032]: I0122 18:00:35.549373 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncgj8" event={"ID":"476fed42-a934-4906-867f-852254c463a1","Type":"ContainerDied","Data":"4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7"} Jan 22 18:00:35 crc kubenswrapper[5032]: I0122 18:00:35.549619 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncgj8" event={"ID":"476fed42-a934-4906-867f-852254c463a1","Type":"ContainerStarted","Data":"d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c"} Jan 22 18:00:36 crc kubenswrapper[5032]: I0122 18:00:36.559415 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncgj8" event={"ID":"476fed42-a934-4906-867f-852254c463a1","Type":"ContainerStarted","Data":"870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc"} Jan 22 18:00:39 crc kubenswrapper[5032]: I0122 18:00:39.590073 5032 generic.go:334] "Generic (PLEG): container finished" podID="476fed42-a934-4906-867f-852254c463a1" containerID="870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc" exitCode=0 Jan 22 18:00:39 crc kubenswrapper[5032]: I0122 18:00:39.590224 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncgj8" event={"ID":"476fed42-a934-4906-867f-852254c463a1","Type":"ContainerDied","Data":"870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc"} Jan 22 18:00:40 crc kubenswrapper[5032]: I0122 18:00:40.602789 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncgj8" event={"ID":"476fed42-a934-4906-867f-852254c463a1","Type":"ContainerStarted","Data":"0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598"} Jan 22 18:00:40 crc kubenswrapper[5032]: I0122 18:00:40.629493 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ncgj8" podStartSLOduration=2.142226816 podStartE2EDuration="6.629464065s" podCreationTimestamp="2026-01-22 18:00:34 +0000 UTC" firstStartedPulling="2026-01-22 18:00:35.55431601 +0000 UTC m=+6939.366485071" lastFinishedPulling="2026-01-22 18:00:40.041553299 +0000 UTC m=+6943.853722320" observedRunningTime="2026-01-22 18:00:40.626424864 +0000 UTC m=+6944.438593975" watchObservedRunningTime="2026-01-22 18:00:40.629464065 +0000 UTC m=+6944.441633136" Jan 22 18:00:44 crc kubenswrapper[5032]: I0122 18:00:44.538310 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:44 crc kubenswrapper[5032]: I0122 18:00:44.538795 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:45 crc kubenswrapper[5032]: I0122 18:00:45.585490 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ncgj8" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="registry-server" probeResult="failure" output=< Jan 22 18:00:45 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 18:00:45 crc kubenswrapper[5032]: > Jan 22 18:00:54 crc kubenswrapper[5032]: I0122 18:00:54.599505 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:54 crc kubenswrapper[5032]: I0122 18:00:54.654258 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:54 crc kubenswrapper[5032]: I0122 18:00:54.837219 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ncgj8"] Jan 22 18:00:55 crc kubenswrapper[5032]: I0122 18:00:55.728108 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ncgj8" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="registry-server" containerID="cri-o://0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598" gracePeriod=2 Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.247328 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.353976 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-utilities\") pod \"476fed42-a934-4906-867f-852254c463a1\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.354308 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-catalog-content\") pod \"476fed42-a934-4906-867f-852254c463a1\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.354362 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdhx7\" (UniqueName: \"kubernetes.io/projected/476fed42-a934-4906-867f-852254c463a1-kube-api-access-sdhx7\") pod \"476fed42-a934-4906-867f-852254c463a1\" (UID: \"476fed42-a934-4906-867f-852254c463a1\") " Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.355042 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-utilities" (OuterVolumeSpecName: "utilities") pod "476fed42-a934-4906-867f-852254c463a1" (UID: "476fed42-a934-4906-867f-852254c463a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.360836 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/476fed42-a934-4906-867f-852254c463a1-kube-api-access-sdhx7" (OuterVolumeSpecName: "kube-api-access-sdhx7") pod "476fed42-a934-4906-867f-852254c463a1" (UID: "476fed42-a934-4906-867f-852254c463a1"). InnerVolumeSpecName "kube-api-access-sdhx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.456378 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdhx7\" (UniqueName: \"kubernetes.io/projected/476fed42-a934-4906-867f-852254c463a1-kube-api-access-sdhx7\") on node \"crc\" DevicePath \"\"" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.456416 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.466443 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "476fed42-a934-4906-867f-852254c463a1" (UID: "476fed42-a934-4906-867f-852254c463a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.558531 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/476fed42-a934-4906-867f-852254c463a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.740011 5032 generic.go:334] "Generic (PLEG): container finished" podID="476fed42-a934-4906-867f-852254c463a1" containerID="0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598" exitCode=0 Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.740075 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncgj8" event={"ID":"476fed42-a934-4906-867f-852254c463a1","Type":"ContainerDied","Data":"0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598"} Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.740096 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncgj8" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.740115 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncgj8" event={"ID":"476fed42-a934-4906-867f-852254c463a1","Type":"ContainerDied","Data":"d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c"} Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.740145 5032 scope.go:117] "RemoveContainer" containerID="0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.771119 5032 scope.go:117] "RemoveContainer" containerID="870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.775305 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ncgj8"] Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.786109 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ncgj8"] Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.798125 5032 scope.go:117] "RemoveContainer" containerID="4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.856316 5032 scope.go:117] "RemoveContainer" containerID="0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598" Jan 22 18:00:56 crc kubenswrapper[5032]: E0122 18:00:56.856699 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598\": container with ID starting with 0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598 not found: ID does not exist" containerID="0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.856734 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598"} err="failed to get container status \"0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598\": rpc error: code = NotFound desc = could not find container \"0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598\": container with ID starting with 0f03e4b5e79d7f498b817db80a9837930440b9db95c39ea6bc769da5d4b7d598 not found: ID does not exist" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.856755 5032 scope.go:117] "RemoveContainer" containerID="870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc" Jan 22 18:00:56 crc kubenswrapper[5032]: E0122 18:00:56.857353 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc\": container with ID starting with 870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc not found: ID does not exist" containerID="870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.857382 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc"} err="failed to get container status \"870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc\": rpc error: code = NotFound desc = could not find container \"870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc\": container with ID starting with 870d00fd6db5275eeaddb2420ff7148b37edf6043a9f111569c5263676e7eebc not found: ID does not exist" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.857402 5032 scope.go:117] "RemoveContainer" containerID="4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7" Jan 22 18:00:56 crc kubenswrapper[5032]: E0122 18:00:56.857878 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7\": container with ID starting with 4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7 not found: ID does not exist" containerID="4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7" Jan 22 18:00:56 crc kubenswrapper[5032]: I0122 18:00:56.857927 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7"} err="failed to get container status \"4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7\": rpc error: code = NotFound desc = could not find container \"4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7\": container with ID starting with 4fc1fd74e8b6f148623704dd79f5a7e54658930a8fcfe76662a8d639416eb8e7 not found: ID does not exist" Jan 22 18:00:58 crc kubenswrapper[5032]: I0122 18:00:58.469544 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="476fed42-a934-4906-867f-852254c463a1" path="/var/lib/kubelet/pods/476fed42-a934-4906-867f-852254c463a1/volumes" Jan 22 18:00:59 crc kubenswrapper[5032]: E0122 18:00:59.881127 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice/crio-d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c\": RecentStats: unable to find data in memory cache]" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.166697 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29485081-5xndb"] Jan 22 18:01:00 crc kubenswrapper[5032]: E0122 18:01:00.167496 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="extract-content" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.167515 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="extract-content" Jan 22 18:01:00 crc kubenswrapper[5032]: E0122 18:01:00.167535 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="registry-server" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.167543 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="registry-server" Jan 22 18:01:00 crc kubenswrapper[5032]: E0122 18:01:00.167581 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="extract-utilities" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.167591 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="extract-utilities" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.167815 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="476fed42-a934-4906-867f-852254c463a1" containerName="registry-server" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.168687 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.182772 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29485081-5xndb"] Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.231032 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96bqh\" (UniqueName: \"kubernetes.io/projected/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-kube-api-access-96bqh\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.231108 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-combined-ca-bundle\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.231326 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-config-data\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.231564 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-fernet-keys\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.333239 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96bqh\" (UniqueName: \"kubernetes.io/projected/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-kube-api-access-96bqh\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.333291 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-combined-ca-bundle\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.333329 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-config-data\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.333424 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-fernet-keys\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.339228 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-fernet-keys\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.339792 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-config-data\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.343891 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-combined-ca-bundle\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.350460 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96bqh\" (UniqueName: \"kubernetes.io/projected/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-kube-api-access-96bqh\") pod \"keystone-cron-29485081-5xndb\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:00 crc kubenswrapper[5032]: I0122 18:01:00.503688 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:01 crc kubenswrapper[5032]: I0122 18:01:01.003400 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29485081-5xndb"] Jan 22 18:01:01 crc kubenswrapper[5032]: W0122 18:01:01.004525 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8f069f5_0e2c_48b1_91ed_b9a551a9a614.slice/crio-2269635cbc402704383a399d02f266529d53f9ae8aa53242651c606b9429a962 WatchSource:0}: Error finding container 2269635cbc402704383a399d02f266529d53f9ae8aa53242651c606b9429a962: Status 404 returned error can't find the container with id 2269635cbc402704383a399d02f266529d53f9ae8aa53242651c606b9429a962 Jan 22 18:01:01 crc kubenswrapper[5032]: I0122 18:01:01.793396 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485081-5xndb" event={"ID":"d8f069f5-0e2c-48b1-91ed-b9a551a9a614","Type":"ContainerStarted","Data":"5f9b7efda68a6f3b03391118df9e250d79f01ce6a8f8f229c7607b1255b27780"} Jan 22 18:01:01 crc kubenswrapper[5032]: I0122 18:01:01.793650 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485081-5xndb" event={"ID":"d8f069f5-0e2c-48b1-91ed-b9a551a9a614","Type":"ContainerStarted","Data":"2269635cbc402704383a399d02f266529d53f9ae8aa53242651c606b9429a962"} Jan 22 18:01:01 crc kubenswrapper[5032]: I0122 18:01:01.820752 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29485081-5xndb" podStartSLOduration=1.82072363 podStartE2EDuration="1.82072363s" podCreationTimestamp="2026-01-22 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 18:01:01.808271459 +0000 UTC m=+6965.620440510" watchObservedRunningTime="2026-01-22 18:01:01.82072363 +0000 UTC m=+6965.632892681" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.016662 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.017009 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.017062 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.017761 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"072818648ffa3dd9f9b5b2015d59fcee6839118163a747be31afa83595cfb477"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.017805 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://072818648ffa3dd9f9b5b2015d59fcee6839118163a747be31afa83595cfb477" gracePeriod=600 Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.538471 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-snvtr"] Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.542359 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.556327 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-snvtr"] Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.597101 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b59h\" (UniqueName: \"kubernetes.io/projected/1367e29b-5978-4a7e-992b-4e70597c6a0f-kube-api-access-8b59h\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.597205 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-catalog-content\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.597266 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-utilities\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.698655 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b59h\" (UniqueName: \"kubernetes.io/projected/1367e29b-5978-4a7e-992b-4e70597c6a0f-kube-api-access-8b59h\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.698729 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-catalog-content\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.698814 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-utilities\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.699436 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-utilities\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.700022 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-catalog-content\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.720847 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b59h\" (UniqueName: \"kubernetes.io/projected/1367e29b-5978-4a7e-992b-4e70597c6a0f-kube-api-access-8b59h\") pod \"redhat-marketplace-snvtr\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.811308 5032 generic.go:334] "Generic (PLEG): container finished" podID="d8f069f5-0e2c-48b1-91ed-b9a551a9a614" containerID="5f9b7efda68a6f3b03391118df9e250d79f01ce6a8f8f229c7607b1255b27780" exitCode=0 Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.811393 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485081-5xndb" event={"ID":"d8f069f5-0e2c-48b1-91ed-b9a551a9a614","Type":"ContainerDied","Data":"5f9b7efda68a6f3b03391118df9e250d79f01ce6a8f8f229c7607b1255b27780"} Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.816540 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="072818648ffa3dd9f9b5b2015d59fcee6839118163a747be31afa83595cfb477" exitCode=0 Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.816579 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"072818648ffa3dd9f9b5b2015d59fcee6839118163a747be31afa83595cfb477"} Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.816604 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706"} Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.816621 5032 scope.go:117] "RemoveContainer" containerID="b56eaecdf58243d50898780bb017323d00f485021329000877c16b84e738df57" Jan 22 18:01:03 crc kubenswrapper[5032]: I0122 18:01:03.886802 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:04 crc kubenswrapper[5032]: I0122 18:01:04.355414 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-snvtr"] Jan 22 18:01:04 crc kubenswrapper[5032]: I0122 18:01:04.826894 5032 generic.go:334] "Generic (PLEG): container finished" podID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerID="0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2" exitCode=0 Jan 22 18:01:04 crc kubenswrapper[5032]: I0122 18:01:04.827286 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-snvtr" event={"ID":"1367e29b-5978-4a7e-992b-4e70597c6a0f","Type":"ContainerDied","Data":"0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2"} Jan 22 18:01:04 crc kubenswrapper[5032]: I0122 18:01:04.827347 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-snvtr" event={"ID":"1367e29b-5978-4a7e-992b-4e70597c6a0f","Type":"ContainerStarted","Data":"4a4fc9a2dda19b7ee2d11492091495ae4cae037a083f7c28d9ec2655626b18e9"} Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.222479 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.331481 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-combined-ca-bundle\") pod \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.332552 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-fernet-keys\") pod \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.332622 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96bqh\" (UniqueName: \"kubernetes.io/projected/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-kube-api-access-96bqh\") pod \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.332671 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-config-data\") pod \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\" (UID: \"d8f069f5-0e2c-48b1-91ed-b9a551a9a614\") " Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.340221 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d8f069f5-0e2c-48b1-91ed-b9a551a9a614" (UID: "d8f069f5-0e2c-48b1-91ed-b9a551a9a614"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.340391 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-kube-api-access-96bqh" (OuterVolumeSpecName: "kube-api-access-96bqh") pod "d8f069f5-0e2c-48b1-91ed-b9a551a9a614" (UID: "d8f069f5-0e2c-48b1-91ed-b9a551a9a614"). InnerVolumeSpecName "kube-api-access-96bqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.369281 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8f069f5-0e2c-48b1-91ed-b9a551a9a614" (UID: "d8f069f5-0e2c-48b1-91ed-b9a551a9a614"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.396101 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-config-data" (OuterVolumeSpecName: "config-data") pod "d8f069f5-0e2c-48b1-91ed-b9a551a9a614" (UID: "d8f069f5-0e2c-48b1-91ed-b9a551a9a614"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.435306 5032 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.435349 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96bqh\" (UniqueName: \"kubernetes.io/projected/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-kube-api-access-96bqh\") on node \"crc\" DevicePath \"\"" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.435364 5032 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.435379 5032 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f069f5-0e2c-48b1-91ed-b9a551a9a614-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.853200 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29485081-5xndb" event={"ID":"d8f069f5-0e2c-48b1-91ed-b9a551a9a614","Type":"ContainerDied","Data":"2269635cbc402704383a399d02f266529d53f9ae8aa53242651c606b9429a962"} Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.853253 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2269635cbc402704383a399d02f266529d53f9ae8aa53242651c606b9429a962" Jan 22 18:01:05 crc kubenswrapper[5032]: I0122 18:01:05.853330 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29485081-5xndb" Jan 22 18:01:06 crc kubenswrapper[5032]: I0122 18:01:06.864681 5032 generic.go:334] "Generic (PLEG): container finished" podID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerID="caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de" exitCode=0 Jan 22 18:01:06 crc kubenswrapper[5032]: I0122 18:01:06.864781 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-snvtr" event={"ID":"1367e29b-5978-4a7e-992b-4e70597c6a0f","Type":"ContainerDied","Data":"caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de"} Jan 22 18:01:07 crc kubenswrapper[5032]: I0122 18:01:07.876710 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-snvtr" event={"ID":"1367e29b-5978-4a7e-992b-4e70597c6a0f","Type":"ContainerStarted","Data":"357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a"} Jan 22 18:01:07 crc kubenswrapper[5032]: I0122 18:01:07.894211 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-snvtr" podStartSLOduration=2.424007977 podStartE2EDuration="4.894191709s" podCreationTimestamp="2026-01-22 18:01:03 +0000 UTC" firstStartedPulling="2026-01-22 18:01:04.83083433 +0000 UTC m=+6968.643003361" lastFinishedPulling="2026-01-22 18:01:07.301018062 +0000 UTC m=+6971.113187093" observedRunningTime="2026-01-22 18:01:07.891641771 +0000 UTC m=+6971.703810812" watchObservedRunningTime="2026-01-22 18:01:07.894191709 +0000 UTC m=+6971.706360740" Jan 22 18:01:10 crc kubenswrapper[5032]: E0122 18:01:10.104460 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice/crio-d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c\": RecentStats: unable to find data in memory cache]" Jan 22 18:01:13 crc kubenswrapper[5032]: I0122 18:01:13.887166 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:13 crc kubenswrapper[5032]: I0122 18:01:13.887825 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:13 crc kubenswrapper[5032]: I0122 18:01:13.943758 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:13 crc kubenswrapper[5032]: I0122 18:01:13.994836 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:14 crc kubenswrapper[5032]: I0122 18:01:14.179714 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-snvtr"] Jan 22 18:01:15 crc kubenswrapper[5032]: I0122 18:01:15.963803 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-snvtr" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="registry-server" containerID="cri-o://357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a" gracePeriod=2 Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.504706 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.579199 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-catalog-content\") pod \"1367e29b-5978-4a7e-992b-4e70597c6a0f\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.579273 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-utilities\") pod \"1367e29b-5978-4a7e-992b-4e70597c6a0f\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.579371 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b59h\" (UniqueName: \"kubernetes.io/projected/1367e29b-5978-4a7e-992b-4e70597c6a0f-kube-api-access-8b59h\") pod \"1367e29b-5978-4a7e-992b-4e70597c6a0f\" (UID: \"1367e29b-5978-4a7e-992b-4e70597c6a0f\") " Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.580123 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-utilities" (OuterVolumeSpecName: "utilities") pod "1367e29b-5978-4a7e-992b-4e70597c6a0f" (UID: "1367e29b-5978-4a7e-992b-4e70597c6a0f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.582413 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.584740 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1367e29b-5978-4a7e-992b-4e70597c6a0f-kube-api-access-8b59h" (OuterVolumeSpecName: "kube-api-access-8b59h") pod "1367e29b-5978-4a7e-992b-4e70597c6a0f" (UID: "1367e29b-5978-4a7e-992b-4e70597c6a0f"). InnerVolumeSpecName "kube-api-access-8b59h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.636283 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1367e29b-5978-4a7e-992b-4e70597c6a0f" (UID: "1367e29b-5978-4a7e-992b-4e70597c6a0f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.684208 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1367e29b-5978-4a7e-992b-4e70597c6a0f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.684264 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b59h\" (UniqueName: \"kubernetes.io/projected/1367e29b-5978-4a7e-992b-4e70597c6a0f-kube-api-access-8b59h\") on node \"crc\" DevicePath \"\"" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.984033 5032 generic.go:334] "Generic (PLEG): container finished" podID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerID="357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a" exitCode=0 Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.984081 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-snvtr" Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.984107 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-snvtr" event={"ID":"1367e29b-5978-4a7e-992b-4e70597c6a0f","Type":"ContainerDied","Data":"357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a"} Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.985339 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-snvtr" event={"ID":"1367e29b-5978-4a7e-992b-4e70597c6a0f","Type":"ContainerDied","Data":"4a4fc9a2dda19b7ee2d11492091495ae4cae037a083f7c28d9ec2655626b18e9"} Jan 22 18:01:16 crc kubenswrapper[5032]: I0122 18:01:16.985369 5032 scope.go:117] "RemoveContainer" containerID="357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.014710 5032 scope.go:117] "RemoveContainer" containerID="caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.027528 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-snvtr"] Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.040260 5032 scope.go:117] "RemoveContainer" containerID="0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.044424 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-snvtr"] Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.083426 5032 scope.go:117] "RemoveContainer" containerID="357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a" Jan 22 18:01:17 crc kubenswrapper[5032]: E0122 18:01:17.083958 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a\": container with ID starting with 357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a not found: ID does not exist" containerID="357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.084020 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a"} err="failed to get container status \"357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a\": rpc error: code = NotFound desc = could not find container \"357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a\": container with ID starting with 357ab74a38700dcdb0aec0650fbe98874a40f3b4bf1a17d63cfe5d9fa8856a6a not found: ID does not exist" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.084070 5032 scope.go:117] "RemoveContainer" containerID="caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de" Jan 22 18:01:17 crc kubenswrapper[5032]: E0122 18:01:17.084599 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de\": container with ID starting with caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de not found: ID does not exist" containerID="caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.084633 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de"} err="failed to get container status \"caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de\": rpc error: code = NotFound desc = could not find container \"caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de\": container with ID starting with caa66064269e75190e2780708aad9c283519ea2bd66aa3072f786067968a85de not found: ID does not exist" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.084656 5032 scope.go:117] "RemoveContainer" containerID="0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2" Jan 22 18:01:17 crc kubenswrapper[5032]: E0122 18:01:17.085015 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2\": container with ID starting with 0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2 not found: ID does not exist" containerID="0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2" Jan 22 18:01:17 crc kubenswrapper[5032]: I0122 18:01:17.085080 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2"} err="failed to get container status \"0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2\": rpc error: code = NotFound desc = could not find container \"0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2\": container with ID starting with 0f1013fc4173535727779538e071ed6c2988b68d1916e0bad24f04012fe919a2 not found: ID does not exist" Jan 22 18:01:18 crc kubenswrapper[5032]: I0122 18:01:18.473092 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" path="/var/lib/kubelet/pods/1367e29b-5978-4a7e-992b-4e70597c6a0f/volumes" Jan 22 18:01:20 crc kubenswrapper[5032]: E0122 18:01:20.321132 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice/crio-d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c\": RecentStats: unable to find data in memory cache]" Jan 22 18:01:30 crc kubenswrapper[5032]: E0122 18:01:30.589174 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice/crio-d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c\": RecentStats: unable to find data in memory cache]" Jan 22 18:01:40 crc kubenswrapper[5032]: E0122 18:01:40.907288 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice/crio-d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c\": RecentStats: unable to find data in memory cache]" Jan 22 18:01:51 crc kubenswrapper[5032]: E0122 18:01:51.152411 5032 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice/crio-d472eaa10d4fe6651f604290add4cbf0e9152f6961b839435989a48d9c7f978c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod476fed42_a934_4906_867f_852254c463a1.slice\": RecentStats: unable to find data in memory cache]" Jan 22 18:03:03 crc kubenswrapper[5032]: I0122 18:03:03.016568 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:03:03 crc kubenswrapper[5032]: I0122 18:03:03.017330 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:03:33 crc kubenswrapper[5032]: I0122 18:03:33.016360 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:03:33 crc kubenswrapper[5032]: I0122 18:03:33.017379 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.017127 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.017831 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.017919 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.018904 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.019076 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" gracePeriod=600 Jan 22 18:04:03 crc kubenswrapper[5032]: E0122 18:04:03.142500 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.600344 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" exitCode=0 Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.600414 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706"} Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.600466 5032 scope.go:117] "RemoveContainer" containerID="072818648ffa3dd9f9b5b2015d59fcee6839118163a747be31afa83595cfb477" Jan 22 18:04:03 crc kubenswrapper[5032]: I0122 18:04:03.601833 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:04:03 crc kubenswrapper[5032]: E0122 18:04:03.602186 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:04:14 crc kubenswrapper[5032]: I0122 18:04:14.455343 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:04:14 crc kubenswrapper[5032]: E0122 18:04:14.456135 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:04:28 crc kubenswrapper[5032]: I0122 18:04:28.454437 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:04:28 crc kubenswrapper[5032]: E0122 18:04:28.455608 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:04:39 crc kubenswrapper[5032]: I0122 18:04:39.453962 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:04:39 crc kubenswrapper[5032]: E0122 18:04:39.454682 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:04:53 crc kubenswrapper[5032]: I0122 18:04:53.454753 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:04:53 crc kubenswrapper[5032]: E0122 18:04:53.455563 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:05:04 crc kubenswrapper[5032]: I0122 18:05:04.455379 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:05:04 crc kubenswrapper[5032]: E0122 18:05:04.456404 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:05:19 crc kubenswrapper[5032]: I0122 18:05:19.454534 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:05:19 crc kubenswrapper[5032]: E0122 18:05:19.455437 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:05:30 crc kubenswrapper[5032]: I0122 18:05:30.455266 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:05:30 crc kubenswrapper[5032]: E0122 18:05:30.456488 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:05:43 crc kubenswrapper[5032]: I0122 18:05:43.455565 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:05:43 crc kubenswrapper[5032]: E0122 18:05:43.456308 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:05:55 crc kubenswrapper[5032]: I0122 18:05:55.454801 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:05:55 crc kubenswrapper[5032]: E0122 18:05:55.455547 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:06:08 crc kubenswrapper[5032]: I0122 18:06:08.455104 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:06:08 crc kubenswrapper[5032]: E0122 18:06:08.457335 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:06:23 crc kubenswrapper[5032]: I0122 18:06:23.454679 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:06:23 crc kubenswrapper[5032]: E0122 18:06:23.455472 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:06:35 crc kubenswrapper[5032]: I0122 18:06:35.455681 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:06:35 crc kubenswrapper[5032]: E0122 18:06:35.456450 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:06:46 crc kubenswrapper[5032]: I0122 18:06:46.461580 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:06:46 crc kubenswrapper[5032]: E0122 18:06:46.462266 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:06:59 crc kubenswrapper[5032]: I0122 18:06:59.455092 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:06:59 crc kubenswrapper[5032]: E0122 18:06:59.456829 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:07:10 crc kubenswrapper[5032]: I0122 18:07:10.465746 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:07:10 crc kubenswrapper[5032]: E0122 18:07:10.466697 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.867256 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7dhhj"] Jan 22 18:07:18 crc kubenswrapper[5032]: E0122 18:07:18.868332 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="extract-content" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.868349 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="extract-content" Jan 22 18:07:18 crc kubenswrapper[5032]: E0122 18:07:18.868382 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8f069f5-0e2c-48b1-91ed-b9a551a9a614" containerName="keystone-cron" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.868389 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8f069f5-0e2c-48b1-91ed-b9a551a9a614" containerName="keystone-cron" Jan 22 18:07:18 crc kubenswrapper[5032]: E0122 18:07:18.868414 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="registry-server" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.868422 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="registry-server" Jan 22 18:07:18 crc kubenswrapper[5032]: E0122 18:07:18.868468 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="extract-utilities" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.868477 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="extract-utilities" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.868709 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8f069f5-0e2c-48b1-91ed-b9a551a9a614" containerName="keystone-cron" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.868735 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="1367e29b-5978-4a7e-992b-4e70597c6a0f" containerName="registry-server" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.873218 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:18 crc kubenswrapper[5032]: I0122 18:07:18.881302 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7dhhj"] Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.029387 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-catalog-content\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.029538 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-utilities\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.029685 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66shg\" (UniqueName: \"kubernetes.io/projected/a2cd7cd4-6024-4822-bf56-91fe8dd817af-kube-api-access-66shg\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.131553 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-utilities\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.132031 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66shg\" (UniqueName: \"kubernetes.io/projected/a2cd7cd4-6024-4822-bf56-91fe8dd817af-kube-api-access-66shg\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.132168 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-catalog-content\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.132179 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-utilities\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.132616 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-catalog-content\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.156600 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66shg\" (UniqueName: \"kubernetes.io/projected/a2cd7cd4-6024-4822-bf56-91fe8dd817af-kube-api-access-66shg\") pod \"certified-operators-7dhhj\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.213658 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:19 crc kubenswrapper[5032]: I0122 18:07:19.696843 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7dhhj"] Jan 22 18:07:20 crc kubenswrapper[5032]: I0122 18:07:20.553604 5032 generic.go:334] "Generic (PLEG): container finished" podID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerID="937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678" exitCode=0 Jan 22 18:07:20 crc kubenswrapper[5032]: I0122 18:07:20.553660 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7dhhj" event={"ID":"a2cd7cd4-6024-4822-bf56-91fe8dd817af","Type":"ContainerDied","Data":"937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678"} Jan 22 18:07:20 crc kubenswrapper[5032]: I0122 18:07:20.553966 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7dhhj" event={"ID":"a2cd7cd4-6024-4822-bf56-91fe8dd817af","Type":"ContainerStarted","Data":"2b981b175c11167006b853d4a1c6002a8a3701e71f482904728d3b6e9d76652c"} Jan 22 18:07:20 crc kubenswrapper[5032]: I0122 18:07:20.555600 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 18:07:23 crc kubenswrapper[5032]: I0122 18:07:23.455152 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:07:23 crc kubenswrapper[5032]: E0122 18:07:23.455795 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:07:23 crc kubenswrapper[5032]: I0122 18:07:23.582363 5032 generic.go:334] "Generic (PLEG): container finished" podID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerID="886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd" exitCode=0 Jan 22 18:07:23 crc kubenswrapper[5032]: I0122 18:07:23.582441 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7dhhj" event={"ID":"a2cd7cd4-6024-4822-bf56-91fe8dd817af","Type":"ContainerDied","Data":"886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd"} Jan 22 18:07:24 crc kubenswrapper[5032]: I0122 18:07:24.592434 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7dhhj" event={"ID":"a2cd7cd4-6024-4822-bf56-91fe8dd817af","Type":"ContainerStarted","Data":"375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a"} Jan 22 18:07:24 crc kubenswrapper[5032]: I0122 18:07:24.617733 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7dhhj" podStartSLOduration=3.15111686 podStartE2EDuration="6.617713713s" podCreationTimestamp="2026-01-22 18:07:18 +0000 UTC" firstStartedPulling="2026-01-22 18:07:20.555331561 +0000 UTC m=+7344.367500592" lastFinishedPulling="2026-01-22 18:07:24.021928414 +0000 UTC m=+7347.834097445" observedRunningTime="2026-01-22 18:07:24.611937702 +0000 UTC m=+7348.424106753" watchObservedRunningTime="2026-01-22 18:07:24.617713713 +0000 UTC m=+7348.429882744" Jan 22 18:07:29 crc kubenswrapper[5032]: I0122 18:07:29.213845 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:29 crc kubenswrapper[5032]: I0122 18:07:29.214445 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:29 crc kubenswrapper[5032]: I0122 18:07:29.281601 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:30 crc kubenswrapper[5032]: I0122 18:07:30.126548 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:30 crc kubenswrapper[5032]: I0122 18:07:30.170637 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7dhhj"] Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.088262 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7dhhj" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="registry-server" containerID="cri-o://375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a" gracePeriod=2 Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.600238 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.723722 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-catalog-content\") pod \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.724048 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-utilities\") pod \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.724079 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66shg\" (UniqueName: \"kubernetes.io/projected/a2cd7cd4-6024-4822-bf56-91fe8dd817af-kube-api-access-66shg\") pod \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\" (UID: \"a2cd7cd4-6024-4822-bf56-91fe8dd817af\") " Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.725211 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-utilities" (OuterVolumeSpecName: "utilities") pod "a2cd7cd4-6024-4822-bf56-91fe8dd817af" (UID: "a2cd7cd4-6024-4822-bf56-91fe8dd817af"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.729706 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2cd7cd4-6024-4822-bf56-91fe8dd817af-kube-api-access-66shg" (OuterVolumeSpecName: "kube-api-access-66shg") pod "a2cd7cd4-6024-4822-bf56-91fe8dd817af" (UID: "a2cd7cd4-6024-4822-bf56-91fe8dd817af"). InnerVolumeSpecName "kube-api-access-66shg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.779366 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2cd7cd4-6024-4822-bf56-91fe8dd817af" (UID: "a2cd7cd4-6024-4822-bf56-91fe8dd817af"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.826553 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.826587 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cd7cd4-6024-4822-bf56-91fe8dd817af-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 18:07:32 crc kubenswrapper[5032]: I0122 18:07:32.826600 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66shg\" (UniqueName: \"kubernetes.io/projected/a2cd7cd4-6024-4822-bf56-91fe8dd817af-kube-api-access-66shg\") on node \"crc\" DevicePath \"\"" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.098938 5032 generic.go:334] "Generic (PLEG): container finished" podID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerID="375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a" exitCode=0 Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.098989 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7dhhj" event={"ID":"a2cd7cd4-6024-4822-bf56-91fe8dd817af","Type":"ContainerDied","Data":"375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a"} Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.099022 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7dhhj" event={"ID":"a2cd7cd4-6024-4822-bf56-91fe8dd817af","Type":"ContainerDied","Data":"2b981b175c11167006b853d4a1c6002a8a3701e71f482904728d3b6e9d76652c"} Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.099043 5032 scope.go:117] "RemoveContainer" containerID="375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.098998 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7dhhj" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.146368 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7dhhj"] Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.154547 5032 scope.go:117] "RemoveContainer" containerID="886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.158684 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7dhhj"] Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.178022 5032 scope.go:117] "RemoveContainer" containerID="937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.234754 5032 scope.go:117] "RemoveContainer" containerID="375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a" Jan 22 18:07:33 crc kubenswrapper[5032]: E0122 18:07:33.235396 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a\": container with ID starting with 375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a not found: ID does not exist" containerID="375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.235474 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a"} err="failed to get container status \"375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a\": rpc error: code = NotFound desc = could not find container \"375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a\": container with ID starting with 375de53703ea6cf99f5547f58f7669de5d99a4b5cc9eeb98a29ecde08b82746a not found: ID does not exist" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.235510 5032 scope.go:117] "RemoveContainer" containerID="886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd" Jan 22 18:07:33 crc kubenswrapper[5032]: E0122 18:07:33.235951 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd\": container with ID starting with 886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd not found: ID does not exist" containerID="886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.235993 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd"} err="failed to get container status \"886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd\": rpc error: code = NotFound desc = could not find container \"886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd\": container with ID starting with 886b52b564dbfc9a0ea02c12d68370862dffc5e2fd4ba18c382fce9ce1d460bd not found: ID does not exist" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.236016 5032 scope.go:117] "RemoveContainer" containerID="937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678" Jan 22 18:07:33 crc kubenswrapper[5032]: E0122 18:07:33.236279 5032 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678\": container with ID starting with 937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678 not found: ID does not exist" containerID="937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678" Jan 22 18:07:33 crc kubenswrapper[5032]: I0122 18:07:33.236307 5032 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678"} err="failed to get container status \"937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678\": rpc error: code = NotFound desc = could not find container \"937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678\": container with ID starting with 937fc3453c4797b9e386d60143b3fda8213fab6c9027f47d4f5e465fb22ec678 not found: ID does not exist" Jan 22 18:07:34 crc kubenswrapper[5032]: I0122 18:07:34.466122 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" path="/var/lib/kubelet/pods/a2cd7cd4-6024-4822-bf56-91fe8dd817af/volumes" Jan 22 18:07:35 crc kubenswrapper[5032]: I0122 18:07:35.455754 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:07:35 crc kubenswrapper[5032]: E0122 18:07:35.456174 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:07:47 crc kubenswrapper[5032]: I0122 18:07:47.455034 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:07:47 crc kubenswrapper[5032]: E0122 18:07:47.455761 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:07:58 crc kubenswrapper[5032]: I0122 18:07:58.454777 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:07:58 crc kubenswrapper[5032]: E0122 18:07:58.455596 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:08:12 crc kubenswrapper[5032]: I0122 18:08:12.454844 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:08:12 crc kubenswrapper[5032]: E0122 18:08:12.455630 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.969037 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rcjg4/must-gather-vwqph"] Jan 22 18:08:18 crc kubenswrapper[5032]: E0122 18:08:18.970062 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="extract-utilities" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.970075 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="extract-utilities" Jan 22 18:08:18 crc kubenswrapper[5032]: E0122 18:08:18.970096 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="extract-content" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.970102 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="extract-content" Jan 22 18:08:18 crc kubenswrapper[5032]: E0122 18:08:18.970139 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="registry-server" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.970145 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="registry-server" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.970406 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2cd7cd4-6024-4822-bf56-91fe8dd817af" containerName="registry-server" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.971710 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.973681 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rcjg4"/"openshift-service-ca.crt" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.973888 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rcjg4"/"default-dockercfg-pvxr2" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.974109 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rcjg4"/"kube-root-ca.crt" Jan 22 18:08:18 crc kubenswrapper[5032]: I0122 18:08:18.993067 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rcjg4/must-gather-vwqph"] Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.069734 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d9g5\" (UniqueName: \"kubernetes.io/projected/2cad7eb8-5e19-45f7-850a-a931d5d3412b-kube-api-access-2d9g5\") pod \"must-gather-vwqph\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.069809 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2cad7eb8-5e19-45f7-850a-a931d5d3412b-must-gather-output\") pod \"must-gather-vwqph\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.171574 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d9g5\" (UniqueName: \"kubernetes.io/projected/2cad7eb8-5e19-45f7-850a-a931d5d3412b-kube-api-access-2d9g5\") pod \"must-gather-vwqph\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.171911 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2cad7eb8-5e19-45f7-850a-a931d5d3412b-must-gather-output\") pod \"must-gather-vwqph\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.172335 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2cad7eb8-5e19-45f7-850a-a931d5d3412b-must-gather-output\") pod \"must-gather-vwqph\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.190973 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d9g5\" (UniqueName: \"kubernetes.io/projected/2cad7eb8-5e19-45f7-850a-a931d5d3412b-kube-api-access-2d9g5\") pod \"must-gather-vwqph\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.292910 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:08:19 crc kubenswrapper[5032]: I0122 18:08:19.746442 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rcjg4/must-gather-vwqph"] Jan 22 18:08:20 crc kubenswrapper[5032]: I0122 18:08:20.517123 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/must-gather-vwqph" event={"ID":"2cad7eb8-5e19-45f7-850a-a931d5d3412b","Type":"ContainerStarted","Data":"5148351fd009b5d8a9eca391851ec23d23f5f5db0725b36d1d6aa38e28aa18da"} Jan 22 18:08:27 crc kubenswrapper[5032]: I0122 18:08:27.454294 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:08:27 crc kubenswrapper[5032]: E0122 18:08:27.455011 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:08:28 crc kubenswrapper[5032]: I0122 18:08:28.594334 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/must-gather-vwqph" event={"ID":"2cad7eb8-5e19-45f7-850a-a931d5d3412b","Type":"ContainerStarted","Data":"b86e35f2060dc28f17c26d20cb80c892ab419b996f2e7474def032cc742b9ad5"} Jan 22 18:08:28 crc kubenswrapper[5032]: I0122 18:08:28.594916 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/must-gather-vwqph" event={"ID":"2cad7eb8-5e19-45f7-850a-a931d5d3412b","Type":"ContainerStarted","Data":"3fbf036c96315838f89b57355b8c4f43f61074ce46654c5ced72bf5b507fb07a"} Jan 22 18:08:28 crc kubenswrapper[5032]: I0122 18:08:28.614449 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rcjg4/must-gather-vwqph" podStartSLOduration=2.49116696 podStartE2EDuration="10.614427947s" podCreationTimestamp="2026-01-22 18:08:18 +0000 UTC" firstStartedPulling="2026-01-22 18:08:19.745771752 +0000 UTC m=+7403.557940793" lastFinishedPulling="2026-01-22 18:08:27.869032749 +0000 UTC m=+7411.681201780" observedRunningTime="2026-01-22 18:08:28.608343788 +0000 UTC m=+7412.420512839" watchObservedRunningTime="2026-01-22 18:08:28.614427947 +0000 UTC m=+7412.426596968" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.192262 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mhtqc"] Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.195612 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.207481 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mhtqc"] Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.364037 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-catalog-content\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.364303 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp4jl\" (UniqueName: \"kubernetes.io/projected/49e7da13-acda-44b5-856a-93b87cae0eb1-kube-api-access-rp4jl\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.364590 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-utilities\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.467045 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-utilities\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.467126 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-catalog-content\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.467234 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp4jl\" (UniqueName: \"kubernetes.io/projected/49e7da13-acda-44b5-856a-93b87cae0eb1-kube-api-access-rp4jl\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.468233 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-utilities\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.468535 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-catalog-content\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.497036 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-xwswh"] Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.497354 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp4jl\" (UniqueName: \"kubernetes.io/projected/49e7da13-acda-44b5-856a-93b87cae0eb1-kube-api-access-rp4jl\") pod \"community-operators-mhtqc\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.498660 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.521261 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.670974 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a7dd4553-3dc6-40c1-baa4-b4191f00e838-host\") pod \"crc-debug-xwswh\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.671236 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ghnd\" (UniqueName: \"kubernetes.io/projected/a7dd4553-3dc6-40c1-baa4-b4191f00e838-kube-api-access-4ghnd\") pod \"crc-debug-xwswh\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.775062 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a7dd4553-3dc6-40c1-baa4-b4191f00e838-host\") pod \"crc-debug-xwswh\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.775132 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ghnd\" (UniqueName: \"kubernetes.io/projected/a7dd4553-3dc6-40c1-baa4-b4191f00e838-kube-api-access-4ghnd\") pod \"crc-debug-xwswh\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.775632 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a7dd4553-3dc6-40c1-baa4-b4191f00e838-host\") pod \"crc-debug-xwswh\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.799982 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ghnd\" (UniqueName: \"kubernetes.io/projected/a7dd4553-3dc6-40c1-baa4-b4191f00e838-kube-api-access-4ghnd\") pod \"crc-debug-xwswh\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:33 crc kubenswrapper[5032]: I0122 18:08:33.958025 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:08:34 crc kubenswrapper[5032]: I0122 18:08:34.147687 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mhtqc"] Jan 22 18:08:34 crc kubenswrapper[5032]: I0122 18:08:34.664626 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" event={"ID":"a7dd4553-3dc6-40c1-baa4-b4191f00e838","Type":"ContainerStarted","Data":"c2dae19f559d11df65a2128bdb80b07dd5ab67384fae9f33506ee6f10ede7422"} Jan 22 18:08:34 crc kubenswrapper[5032]: I0122 18:08:34.666441 5032 generic.go:334] "Generic (PLEG): container finished" podID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerID="19ba4ba07611844718fa90656656d674f9ddbe594a601c5a9f17101e48e4cb0a" exitCode=0 Jan 22 18:08:34 crc kubenswrapper[5032]: I0122 18:08:34.666483 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhtqc" event={"ID":"49e7da13-acda-44b5-856a-93b87cae0eb1","Type":"ContainerDied","Data":"19ba4ba07611844718fa90656656d674f9ddbe594a601c5a9f17101e48e4cb0a"} Jan 22 18:08:34 crc kubenswrapper[5032]: I0122 18:08:34.666510 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhtqc" event={"ID":"49e7da13-acda-44b5-856a-93b87cae0eb1","Type":"ContainerStarted","Data":"81cf3c9d4b14f4da4a242a8da47e792225c7122985c8e1a66ad66a8dba878dda"} Jan 22 18:08:36 crc kubenswrapper[5032]: I0122 18:08:36.686698 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhtqc" event={"ID":"49e7da13-acda-44b5-856a-93b87cae0eb1","Type":"ContainerStarted","Data":"3b927d769dfdebbafdbc7a83047b51c9ceb5414690f6702ff93d9e5e48fc59b3"} Jan 22 18:08:37 crc kubenswrapper[5032]: I0122 18:08:37.699808 5032 generic.go:334] "Generic (PLEG): container finished" podID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerID="3b927d769dfdebbafdbc7a83047b51c9ceb5414690f6702ff93d9e5e48fc59b3" exitCode=0 Jan 22 18:08:37 crc kubenswrapper[5032]: I0122 18:08:37.699915 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhtqc" event={"ID":"49e7da13-acda-44b5-856a-93b87cae0eb1","Type":"ContainerDied","Data":"3b927d769dfdebbafdbc7a83047b51c9ceb5414690f6702ff93d9e5e48fc59b3"} Jan 22 18:08:39 crc kubenswrapper[5032]: I0122 18:08:39.722772 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhtqc" event={"ID":"49e7da13-acda-44b5-856a-93b87cae0eb1","Type":"ContainerStarted","Data":"d1fa39cc0faedb534c83b919c4b1efec177fafcb7cc1a6077c405a5f24e09f84"} Jan 22 18:08:39 crc kubenswrapper[5032]: I0122 18:08:39.756073 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mhtqc" podStartSLOduration=2.922837446 podStartE2EDuration="6.75604986s" podCreationTimestamp="2026-01-22 18:08:33 +0000 UTC" firstStartedPulling="2026-01-22 18:08:34.668245205 +0000 UTC m=+7418.480414236" lastFinishedPulling="2026-01-22 18:08:38.501457619 +0000 UTC m=+7422.313626650" observedRunningTime="2026-01-22 18:08:39.742037865 +0000 UTC m=+7423.554206896" watchObservedRunningTime="2026-01-22 18:08:39.75604986 +0000 UTC m=+7423.568218891" Jan 22 18:08:41 crc kubenswrapper[5032]: I0122 18:08:41.455011 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:08:41 crc kubenswrapper[5032]: E0122 18:08:41.455939 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:08:43 crc kubenswrapper[5032]: I0122 18:08:43.522251 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:43 crc kubenswrapper[5032]: I0122 18:08:43.522615 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:44 crc kubenswrapper[5032]: I0122 18:08:44.012014 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:44 crc kubenswrapper[5032]: I0122 18:08:44.059525 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:44 crc kubenswrapper[5032]: I0122 18:08:44.254276 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mhtqc"] Jan 22 18:08:45 crc kubenswrapper[5032]: I0122 18:08:45.770741 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mhtqc" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="registry-server" containerID="cri-o://d1fa39cc0faedb534c83b919c4b1efec177fafcb7cc1a6077c405a5f24e09f84" gracePeriod=2 Jan 22 18:08:46 crc kubenswrapper[5032]: I0122 18:08:46.783643 5032 generic.go:334] "Generic (PLEG): container finished" podID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerID="d1fa39cc0faedb534c83b919c4b1efec177fafcb7cc1a6077c405a5f24e09f84" exitCode=0 Jan 22 18:08:46 crc kubenswrapper[5032]: I0122 18:08:46.783732 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhtqc" event={"ID":"49e7da13-acda-44b5-856a-93b87cae0eb1","Type":"ContainerDied","Data":"d1fa39cc0faedb534c83b919c4b1efec177fafcb7cc1a6077c405a5f24e09f84"} Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.588350 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.672101 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-catalog-content\") pod \"49e7da13-acda-44b5-856a-93b87cae0eb1\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.672155 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-utilities\") pod \"49e7da13-acda-44b5-856a-93b87cae0eb1\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.672362 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp4jl\" (UniqueName: \"kubernetes.io/projected/49e7da13-acda-44b5-856a-93b87cae0eb1-kube-api-access-rp4jl\") pod \"49e7da13-acda-44b5-856a-93b87cae0eb1\" (UID: \"49e7da13-acda-44b5-856a-93b87cae0eb1\") " Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.674405 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-utilities" (OuterVolumeSpecName: "utilities") pod "49e7da13-acda-44b5-856a-93b87cae0eb1" (UID: "49e7da13-acda-44b5-856a-93b87cae0eb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.686187 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49e7da13-acda-44b5-856a-93b87cae0eb1-kube-api-access-rp4jl" (OuterVolumeSpecName: "kube-api-access-rp4jl") pod "49e7da13-acda-44b5-856a-93b87cae0eb1" (UID: "49e7da13-acda-44b5-856a-93b87cae0eb1"). InnerVolumeSpecName "kube-api-access-rp4jl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.732598 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "49e7da13-acda-44b5-856a-93b87cae0eb1" (UID: "49e7da13-acda-44b5-856a-93b87cae0eb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.774669 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp4jl\" (UniqueName: \"kubernetes.io/projected/49e7da13-acda-44b5-856a-93b87cae0eb1-kube-api-access-rp4jl\") on node \"crc\" DevicePath \"\"" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.775000 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.775012 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e7da13-acda-44b5-856a-93b87cae0eb1-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.809966 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" event={"ID":"a7dd4553-3dc6-40c1-baa4-b4191f00e838","Type":"ContainerStarted","Data":"aefa8ee0765983450b208bd7e7884077ff3b206471168c74e7681a4bc835a18e"} Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.818033 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhtqc" event={"ID":"49e7da13-acda-44b5-856a-93b87cae0eb1","Type":"ContainerDied","Data":"81cf3c9d4b14f4da4a242a8da47e792225c7122985c8e1a66ad66a8dba878dda"} Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.818093 5032 scope.go:117] "RemoveContainer" containerID="d1fa39cc0faedb534c83b919c4b1efec177fafcb7cc1a6077c405a5f24e09f84" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.818293 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhtqc" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.839189 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" podStartSLOduration=1.481788119 podStartE2EDuration="15.839160781s" podCreationTimestamp="2026-01-22 18:08:33 +0000 UTC" firstStartedPulling="2026-01-22 18:08:33.997991946 +0000 UTC m=+7417.810160977" lastFinishedPulling="2026-01-22 18:08:48.355364608 +0000 UTC m=+7432.167533639" observedRunningTime="2026-01-22 18:08:48.827623631 +0000 UTC m=+7432.639792662" watchObservedRunningTime="2026-01-22 18:08:48.839160781 +0000 UTC m=+7432.651329812" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.876055 5032 scope.go:117] "RemoveContainer" containerID="3b927d769dfdebbafdbc7a83047b51c9ceb5414690f6702ff93d9e5e48fc59b3" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.902982 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mhtqc"] Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.908307 5032 scope.go:117] "RemoveContainer" containerID="19ba4ba07611844718fa90656656d674f9ddbe594a601c5a9f17101e48e4cb0a" Jan 22 18:08:48 crc kubenswrapper[5032]: I0122 18:08:48.921400 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mhtqc"] Jan 22 18:08:50 crc kubenswrapper[5032]: I0122 18:08:50.465461 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" path="/var/lib/kubelet/pods/49e7da13-acda-44b5-856a-93b87cae0eb1/volumes" Jan 22 18:08:53 crc kubenswrapper[5032]: I0122 18:08:53.455350 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:08:53 crc kubenswrapper[5032]: E0122 18:08:53.456571 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:09:08 crc kubenswrapper[5032]: I0122 18:09:08.457062 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:09:09 crc kubenswrapper[5032]: I0122 18:09:09.021655 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"d846a5f5f4cf9db4f36e12537e853c4be8c65828e889a120583571d42c5162c5"} Jan 22 18:09:42 crc kubenswrapper[5032]: I0122 18:09:42.295044 5032 generic.go:334] "Generic (PLEG): container finished" podID="a7dd4553-3dc6-40c1-baa4-b4191f00e838" containerID="aefa8ee0765983450b208bd7e7884077ff3b206471168c74e7681a4bc835a18e" exitCode=0 Jan 22 18:09:42 crc kubenswrapper[5032]: I0122 18:09:42.295156 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" event={"ID":"a7dd4553-3dc6-40c1-baa4-b4191f00e838","Type":"ContainerDied","Data":"aefa8ee0765983450b208bd7e7884077ff3b206471168c74e7681a4bc835a18e"} Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.403212 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.441131 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-xwswh"] Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.449154 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-xwswh"] Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.515378 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a7dd4553-3dc6-40c1-baa4-b4191f00e838-host\") pod \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.515468 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ghnd\" (UniqueName: \"kubernetes.io/projected/a7dd4553-3dc6-40c1-baa4-b4191f00e838-kube-api-access-4ghnd\") pod \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\" (UID: \"a7dd4553-3dc6-40c1-baa4-b4191f00e838\") " Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.515573 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a7dd4553-3dc6-40c1-baa4-b4191f00e838-host" (OuterVolumeSpecName: "host") pod "a7dd4553-3dc6-40c1-baa4-b4191f00e838" (UID: "a7dd4553-3dc6-40c1-baa4-b4191f00e838"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.515945 5032 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a7dd4553-3dc6-40c1-baa4-b4191f00e838-host\") on node \"crc\" DevicePath \"\"" Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.521200 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7dd4553-3dc6-40c1-baa4-b4191f00e838-kube-api-access-4ghnd" (OuterVolumeSpecName: "kube-api-access-4ghnd") pod "a7dd4553-3dc6-40c1-baa4-b4191f00e838" (UID: "a7dd4553-3dc6-40c1-baa4-b4191f00e838"). InnerVolumeSpecName "kube-api-access-4ghnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:09:43 crc kubenswrapper[5032]: I0122 18:09:43.617614 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ghnd\" (UniqueName: \"kubernetes.io/projected/a7dd4553-3dc6-40c1-baa4-b4191f00e838-kube-api-access-4ghnd\") on node \"crc\" DevicePath \"\"" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.314783 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2dae19f559d11df65a2128bdb80b07dd5ab67384fae9f33506ee6f10ede7422" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.314813 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-xwswh" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.465903 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7dd4553-3dc6-40c1-baa4-b4191f00e838" path="/var/lib/kubelet/pods/a7dd4553-3dc6-40c1-baa4-b4191f00e838/volumes" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.643632 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-nrkwj"] Jan 22 18:09:44 crc kubenswrapper[5032]: E0122 18:09:44.644226 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="extract-utilities" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.644253 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="extract-utilities" Jan 22 18:09:44 crc kubenswrapper[5032]: E0122 18:09:44.644269 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7dd4553-3dc6-40c1-baa4-b4191f00e838" containerName="container-00" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.644279 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7dd4553-3dc6-40c1-baa4-b4191f00e838" containerName="container-00" Jan 22 18:09:44 crc kubenswrapper[5032]: E0122 18:09:44.644297 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="extract-content" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.644307 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="extract-content" Jan 22 18:09:44 crc kubenswrapper[5032]: E0122 18:09:44.644332 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="registry-server" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.644340 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="registry-server" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.644562 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="49e7da13-acda-44b5-856a-93b87cae0eb1" containerName="registry-server" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.644615 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7dd4553-3dc6-40c1-baa4-b4191f00e838" containerName="container-00" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.645353 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.738505 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppd6m\" (UniqueName: \"kubernetes.io/projected/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-kube-api-access-ppd6m\") pod \"crc-debug-nrkwj\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.738796 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-host\") pod \"crc-debug-nrkwj\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.840840 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppd6m\" (UniqueName: \"kubernetes.io/projected/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-kube-api-access-ppd6m\") pod \"crc-debug-nrkwj\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.841000 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-host\") pod \"crc-debug-nrkwj\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.841152 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-host\") pod \"crc-debug-nrkwj\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.866192 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppd6m\" (UniqueName: \"kubernetes.io/projected/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-kube-api-access-ppd6m\") pod \"crc-debug-nrkwj\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:44 crc kubenswrapper[5032]: I0122 18:09:44.965399 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:45 crc kubenswrapper[5032]: W0122 18:09:45.003794 5032 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9bc2d2c_06f1_49fc_bf5f_44e9d541e2a4.slice/crio-c32930beb4b6cdea7a030fc47872f78d3569a89f437b92715b969f285cad8251 WatchSource:0}: Error finding container c32930beb4b6cdea7a030fc47872f78d3569a89f437b92715b969f285cad8251: Status 404 returned error can't find the container with id c32930beb4b6cdea7a030fc47872f78d3569a89f437b92715b969f285cad8251 Jan 22 18:09:45 crc kubenswrapper[5032]: I0122 18:09:45.324177 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" event={"ID":"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4","Type":"ContainerStarted","Data":"c32930beb4b6cdea7a030fc47872f78d3569a89f437b92715b969f285cad8251"} Jan 22 18:09:47 crc kubenswrapper[5032]: I0122 18:09:47.349248 5032 generic.go:334] "Generic (PLEG): container finished" podID="f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4" containerID="f57812551338c4124916e1cf506d284d7fd3c60e1a389557475e6917c70702d2" exitCode=0 Jan 22 18:09:47 crc kubenswrapper[5032]: I0122 18:09:47.349473 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" event={"ID":"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4","Type":"ContainerDied","Data":"f57812551338c4124916e1cf506d284d7fd3c60e1a389557475e6917c70702d2"} Jan 22 18:09:48 crc kubenswrapper[5032]: I0122 18:09:48.471912 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:48 crc kubenswrapper[5032]: I0122 18:09:48.604946 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppd6m\" (UniqueName: \"kubernetes.io/projected/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-kube-api-access-ppd6m\") pod \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " Jan 22 18:09:48 crc kubenswrapper[5032]: I0122 18:09:48.605128 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-host\") pod \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\" (UID: \"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4\") " Jan 22 18:09:48 crc kubenswrapper[5032]: I0122 18:09:48.605225 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-host" (OuterVolumeSpecName: "host") pod "f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4" (UID: "f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 18:09:48 crc kubenswrapper[5032]: I0122 18:09:48.605658 5032 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-host\") on node \"crc\" DevicePath \"\"" Jan 22 18:09:48 crc kubenswrapper[5032]: I0122 18:09:48.610585 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-kube-api-access-ppd6m" (OuterVolumeSpecName: "kube-api-access-ppd6m") pod "f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4" (UID: "f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4"). InnerVolumeSpecName "kube-api-access-ppd6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:09:48 crc kubenswrapper[5032]: I0122 18:09:48.706947 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppd6m\" (UniqueName: \"kubernetes.io/projected/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4-kube-api-access-ppd6m\") on node \"crc\" DevicePath \"\"" Jan 22 18:09:49 crc kubenswrapper[5032]: I0122 18:09:49.367649 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" event={"ID":"f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4","Type":"ContainerDied","Data":"c32930beb4b6cdea7a030fc47872f78d3569a89f437b92715b969f285cad8251"} Jan 22 18:09:49 crc kubenswrapper[5032]: I0122 18:09:49.367701 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c32930beb4b6cdea7a030fc47872f78d3569a89f437b92715b969f285cad8251" Jan 22 18:09:49 crc kubenswrapper[5032]: I0122 18:09:49.367774 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-nrkwj" Jan 22 18:09:49 crc kubenswrapper[5032]: I0122 18:09:49.747233 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-nrkwj"] Jan 22 18:09:49 crc kubenswrapper[5032]: I0122 18:09:49.756817 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-nrkwj"] Jan 22 18:09:50 crc kubenswrapper[5032]: I0122 18:09:50.468727 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4" path="/var/lib/kubelet/pods/f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4/volumes" Jan 22 18:09:50 crc kubenswrapper[5032]: I0122 18:09:50.915372 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-5gnpc"] Jan 22 18:09:50 crc kubenswrapper[5032]: E0122 18:09:50.915955 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4" containerName="container-00" Jan 22 18:09:50 crc kubenswrapper[5032]: I0122 18:09:50.915968 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4" containerName="container-00" Jan 22 18:09:50 crc kubenswrapper[5032]: I0122 18:09:50.916167 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9bc2d2c-06f1-49fc-bf5f-44e9d541e2a4" containerName="container-00" Jan 22 18:09:50 crc kubenswrapper[5032]: I0122 18:09:50.916738 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.057625 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a360698-16bf-468f-9bfd-51cdb4c9fb72-host\") pod \"crc-debug-5gnpc\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.058128 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kwb9\" (UniqueName: \"kubernetes.io/projected/6a360698-16bf-468f-9bfd-51cdb4c9fb72-kube-api-access-2kwb9\") pod \"crc-debug-5gnpc\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.160130 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kwb9\" (UniqueName: \"kubernetes.io/projected/6a360698-16bf-468f-9bfd-51cdb4c9fb72-kube-api-access-2kwb9\") pod \"crc-debug-5gnpc\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.160299 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a360698-16bf-468f-9bfd-51cdb4c9fb72-host\") pod \"crc-debug-5gnpc\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.160480 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a360698-16bf-468f-9bfd-51cdb4c9fb72-host\") pod \"crc-debug-5gnpc\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.201630 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kwb9\" (UniqueName: \"kubernetes.io/projected/6a360698-16bf-468f-9bfd-51cdb4c9fb72-kube-api-access-2kwb9\") pod \"crc-debug-5gnpc\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.242495 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:51 crc kubenswrapper[5032]: I0122 18:09:51.382347 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" event={"ID":"6a360698-16bf-468f-9bfd-51cdb4c9fb72","Type":"ContainerStarted","Data":"1c36c056f5206ea4c395ad431f6e362f6f659bba5610468e59cb6f091e4dd201"} Jan 22 18:09:52 crc kubenswrapper[5032]: I0122 18:09:52.392692 5032 generic.go:334] "Generic (PLEG): container finished" podID="6a360698-16bf-468f-9bfd-51cdb4c9fb72" containerID="8feb936e1fbf07b8b05666be76612112d427dd6bf00e8710800d395b04defb3a" exitCode=0 Jan 22 18:09:52 crc kubenswrapper[5032]: I0122 18:09:52.392765 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" event={"ID":"6a360698-16bf-468f-9bfd-51cdb4c9fb72","Type":"ContainerDied","Data":"8feb936e1fbf07b8b05666be76612112d427dd6bf00e8710800d395b04defb3a"} Jan 22 18:09:52 crc kubenswrapper[5032]: I0122 18:09:52.436325 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-5gnpc"] Jan 22 18:09:52 crc kubenswrapper[5032]: I0122 18:09:52.444651 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rcjg4/crc-debug-5gnpc"] Jan 22 18:09:53 crc kubenswrapper[5032]: I0122 18:09:53.504502 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:53 crc kubenswrapper[5032]: I0122 18:09:53.613877 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kwb9\" (UniqueName: \"kubernetes.io/projected/6a360698-16bf-468f-9bfd-51cdb4c9fb72-kube-api-access-2kwb9\") pod \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " Jan 22 18:09:53 crc kubenswrapper[5032]: I0122 18:09:53.613922 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a360698-16bf-468f-9bfd-51cdb4c9fb72-host\") pod \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\" (UID: \"6a360698-16bf-468f-9bfd-51cdb4c9fb72\") " Jan 22 18:09:53 crc kubenswrapper[5032]: I0122 18:09:53.614056 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a360698-16bf-468f-9bfd-51cdb4c9fb72-host" (OuterVolumeSpecName: "host") pod "6a360698-16bf-468f-9bfd-51cdb4c9fb72" (UID: "6a360698-16bf-468f-9bfd-51cdb4c9fb72"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 18:09:53 crc kubenswrapper[5032]: I0122 18:09:53.614446 5032 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a360698-16bf-468f-9bfd-51cdb4c9fb72-host\") on node \"crc\" DevicePath \"\"" Jan 22 18:09:53 crc kubenswrapper[5032]: I0122 18:09:53.619571 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a360698-16bf-468f-9bfd-51cdb4c9fb72-kube-api-access-2kwb9" (OuterVolumeSpecName: "kube-api-access-2kwb9") pod "6a360698-16bf-468f-9bfd-51cdb4c9fb72" (UID: "6a360698-16bf-468f-9bfd-51cdb4c9fb72"). InnerVolumeSpecName "kube-api-access-2kwb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:09:53 crc kubenswrapper[5032]: I0122 18:09:53.716341 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kwb9\" (UniqueName: \"kubernetes.io/projected/6a360698-16bf-468f-9bfd-51cdb4c9fb72-kube-api-access-2kwb9\") on node \"crc\" DevicePath \"\"" Jan 22 18:09:54 crc kubenswrapper[5032]: I0122 18:09:54.410944 5032 scope.go:117] "RemoveContainer" containerID="8feb936e1fbf07b8b05666be76612112d427dd6bf00e8710800d395b04defb3a" Jan 22 18:09:54 crc kubenswrapper[5032]: I0122 18:09:54.410987 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/crc-debug-5gnpc" Jan 22 18:09:54 crc kubenswrapper[5032]: I0122 18:09:54.465666 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a360698-16bf-468f-9bfd-51cdb4c9fb72" path="/var/lib/kubelet/pods/6a360698-16bf-468f-9bfd-51cdb4c9fb72/volumes" Jan 22 18:10:10 crc kubenswrapper[5032]: I0122 18:10:10.132159 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5fb7f99d8d-bm84s_48d84a1b-977f-4127-b0ed-cdf2a93ad6e4/barbican-api/0.log" Jan 22 18:10:10 crc kubenswrapper[5032]: I0122 18:10:10.291770 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5fb7f99d8d-bm84s_48d84a1b-977f-4127-b0ed-cdf2a93ad6e4/barbican-api-log/0.log" Jan 22 18:10:10 crc kubenswrapper[5032]: I0122 18:10:10.379711 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-bdb85dc66-qzsm2_595b4bfb-0e2f-41d7-88b1-8aabdb432b2e/barbican-keystone-listener/0.log" Jan 22 18:10:10 crc kubenswrapper[5032]: I0122 18:10:10.651645 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6b55c79c5-6tv8n_c4d1b24b-8e40-4f2c-9948-55faab954707/barbican-worker/0.log" Jan 22 18:10:10 crc kubenswrapper[5032]: I0122 18:10:10.667999 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6b55c79c5-6tv8n_c4d1b24b-8e40-4f2c-9948-55faab954707/barbican-worker-log/0.log" Jan 22 18:10:10 crc kubenswrapper[5032]: I0122 18:10:10.776233 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-bdb85dc66-qzsm2_595b4bfb-0e2f-41d7-88b1-8aabdb432b2e/barbican-keystone-listener-log/0.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.001289 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-p4lbl_547c061b-8a80-410b-b177-46281b19211c/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.188197 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_89f96057-2096-47e7-88c3-bdeab76e4fe6/ceilometer-central-agent/1.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.272230 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_89f96057-2096-47e7-88c3-bdeab76e4fe6/ceilometer-central-agent/0.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.341661 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_89f96057-2096-47e7-88c3-bdeab76e4fe6/ceilometer-notification-agent/1.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.355615 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_89f96057-2096-47e7-88c3-bdeab76e4fe6/ceilometer-notification-agent/0.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.409232 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_89f96057-2096-47e7-88c3-bdeab76e4fe6/proxy-httpd/0.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.569624 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_89f96057-2096-47e7-88c3-bdeab76e4fe6/sg-core/0.log" Jan 22 18:10:11 crc kubenswrapper[5032]: I0122 18:10:11.785553 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e831a62a-4003-41ec-b3ac-ba0d29a91d95/cinder-scheduler/1.log" Jan 22 18:10:12 crc kubenswrapper[5032]: I0122 18:10:12.313838 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e831a62a-4003-41ec-b3ac-ba0d29a91d95/cinder-scheduler/0.log" Jan 22 18:10:12 crc kubenswrapper[5032]: I0122 18:10:12.320957 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_4771dd6f-b39e-4e0a-9539-52c3b8a80db3/cinder-api-log/0.log" Jan 22 18:10:12 crc kubenswrapper[5032]: I0122 18:10:12.435118 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e831a62a-4003-41ec-b3ac-ba0d29a91d95/probe/0.log" Jan 22 18:10:12 crc kubenswrapper[5032]: I0122 18:10:12.479832 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_4771dd6f-b39e-4e0a-9539-52c3b8a80db3/cinder-api/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.008540 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-9d2fh_d59138da-36dc-47f0-81c0-cfc89b9a7203/init/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.008641 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-9d2fh_d59138da-36dc-47f0-81c0-cfc89b9a7203/init/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.022626 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-qv94x_8b6774b8-1373-4f87-980c-18189e328582/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.025211 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-j284n_d6ff40ca-31db-47a4-8a8a-ea38b9d15e8c/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.307804 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-msxgt_b5c32cc3-0b61-403e-ac6d-fcec5330aec1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.409960 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-9d2fh_d59138da-36dc-47f0-81c0-cfc89b9a7203/dnsmasq-dns/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.512044 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_f0f74898-1fad-4f88-896b-9babae7beba1/glance-httpd/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.549947 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_f0f74898-1fad-4f88-896b-9babae7beba1/glance-log/0.log" Jan 22 18:10:13 crc kubenswrapper[5032]: I0122 18:10:13.690675 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_28e61081-98f8-4f20-9086-5ff5d5bff90c/glance-log/0.log" Jan 22 18:10:15 crc kubenswrapper[5032]: I0122 18:10:15.190226 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_28e61081-98f8-4f20-9086-5ff5d5bff90c/glance-httpd/0.log" Jan 22 18:10:15 crc kubenswrapper[5032]: I0122 18:10:15.268850 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6c7b5bf75b-t5dp6_f9991301-f3df-4fe5-872f-dfba0842580e/horizon/0.log" Jan 22 18:10:15 crc kubenswrapper[5032]: I0122 18:10:15.315904 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-fqhqd_1f8f814b-117a-4c75-a79f-bec5d67320e9/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:15 crc kubenswrapper[5032]: I0122 18:10:15.635916 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-9nk97_3843c5b3-e955-466a-8341-8e13e817172c/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:15 crc kubenswrapper[5032]: I0122 18:10:15.761389 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29485021-rk9mk_0330603b-2b77-4011-a1e7-ff93c8c9c36b/keystone-cron/0.log" Jan 22 18:10:15 crc kubenswrapper[5032]: I0122 18:10:15.847835 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29485081-5xndb_d8f069f5-0e2c-48b1-91ed-b9a551a9a614/keystone-cron/0.log" Jan 22 18:10:16 crc kubenswrapper[5032]: I0122 18:10:16.086248 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_c4751594-9c2d-4f14-8812-8e01b22ab7ee/kube-state-metrics/0.log" Jan 22 18:10:16 crc kubenswrapper[5032]: I0122 18:10:16.126458 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6c7b5bf75b-t5dp6_f9991301-f3df-4fe5-872f-dfba0842580e/horizon-log/0.log" Jan 22 18:10:16 crc kubenswrapper[5032]: I0122 18:10:16.341445 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-7dgq5_edd38900-30d9-4f92-9e58-31002222e3ce/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:16 crc kubenswrapper[5032]: I0122 18:10:16.998690 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-fw9m2_69dc6df4-16ad-4811-af1a-561a56a0e0c6/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:17 crc kubenswrapper[5032]: I0122 18:10:17.306275 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c9dff9df7-cgg9f_31f3bc59-5ab8-4277-a1cf-d496da47a1a1/neutron-httpd/0.log" Jan 22 18:10:17 crc kubenswrapper[5032]: I0122 18:10:17.550585 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-69959c7d95-wj9bh_bfbcce9d-1a0e-4bd5-a6fc-37bfc4e20dd0/keystone-api/0.log" Jan 22 18:10:17 crc kubenswrapper[5032]: I0122 18:10:17.992299 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c9dff9df7-cgg9f_31f3bc59-5ab8-4277-a1cf-d496da47a1a1/neutron-api/0.log" Jan 22 18:10:18 crc kubenswrapper[5032]: I0122 18:10:18.442902 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_cc55079d-e847-4e8b-aa43-55391b8d7a8a/nova-cell0-conductor-conductor/0.log" Jan 22 18:10:18 crc kubenswrapper[5032]: I0122 18:10:18.750757 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_df5d0503-cd88-4501-9466-5e818158b87e/nova-cell1-conductor-conductor/0.log" Jan 22 18:10:19 crc kubenswrapper[5032]: I0122 18:10:19.016678 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_c7489d16-2342-49b9-b936-18f7847fb26e/nova-cell1-novncproxy-novncproxy/0.log" Jan 22 18:10:19 crc kubenswrapper[5032]: I0122 18:10:19.230900 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-84krw_4b0e9abf-570b-428d-8195-a848d6297da4/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:19 crc kubenswrapper[5032]: I0122 18:10:19.589040 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6ed63ebf-4302-41ea-8f32-2536281097d5/nova-api-log/0.log" Jan 22 18:10:20 crc kubenswrapper[5032]: I0122 18:10:20.024642 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_125c93f8-f508-4bd7-a52e-b98f2c3212d6/nova-metadata-log/0.log" Jan 22 18:10:20 crc kubenswrapper[5032]: I0122 18:10:20.420934 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6ed63ebf-4302-41ea-8f32-2536281097d5/nova-api-api/0.log" Jan 22 18:10:20 crc kubenswrapper[5032]: I0122 18:10:20.425980 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_0805eb42-14fe-4cb1-9766-e269515f7d80/nova-scheduler-scheduler/0.log" Jan 22 18:10:20 crc kubenswrapper[5032]: I0122 18:10:20.512565 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3124207a-b471-4d1b-a15f-ce4191494ba2/mysql-bootstrap/0.log" Jan 22 18:10:20 crc kubenswrapper[5032]: I0122 18:10:20.672333 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3124207a-b471-4d1b-a15f-ce4191494ba2/mysql-bootstrap/0.log" Jan 22 18:10:20 crc kubenswrapper[5032]: I0122 18:10:20.769374 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3124207a-b471-4d1b-a15f-ce4191494ba2/galera/0.log" Jan 22 18:10:20 crc kubenswrapper[5032]: I0122 18:10:20.889605 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_45c951d2-5c76-4a36-a53c-47559731095e/mysql-bootstrap/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.168223 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_45c951d2-5c76-4a36-a53c-47559731095e/galera/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.177073 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_45c951d2-5c76-4a36-a53c-47559731095e/mysql-bootstrap/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.337847 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_149da497-8a18-4333-ae25-49d36efc7538/openstackclient/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.379417 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-8rk6c_b60d899c-ce17-4f38-97ea-533ae4e56597/ovn-controller/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.554603 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-t96m6_14f8d6a2-ecc6-468e-82af-b035f56827a3/openstack-network-exporter/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.797759 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-7s9l4_468c8792-5087-4ced-909b-e36976aa235a/ovsdb-server-init/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.944328 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-7s9l4_468c8792-5087-4ced-909b-e36976aa235a/ovsdb-server-init/0.log" Jan 22 18:10:21 crc kubenswrapper[5032]: I0122 18:10:21.953388 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-7s9l4_468c8792-5087-4ced-909b-e36976aa235a/ovs-vswitchd/0.log" Jan 22 18:10:22 crc kubenswrapper[5032]: I0122 18:10:22.013193 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-7s9l4_468c8792-5087-4ced-909b-e36976aa235a/ovsdb-server/0.log" Jan 22 18:10:22 crc kubenswrapper[5032]: I0122 18:10:22.217363 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-8kkc2_1641ce01-3281-4f60-a092-48478c3b7fbb/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:22 crc kubenswrapper[5032]: I0122 18:10:22.465608 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e2012182-a0b0-4312-9235-08bf99c92aa8/ovn-northd/0.log" Jan 22 18:10:22 crc kubenswrapper[5032]: I0122 18:10:22.666482 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e2012182-a0b0-4312-9235-08bf99c92aa8/openstack-network-exporter/0.log" Jan 22 18:10:22 crc kubenswrapper[5032]: I0122 18:10:22.910073 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec/openstack-network-exporter/0.log" Jan 22 18:10:22 crc kubenswrapper[5032]: I0122 18:10:22.990470 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_4d3ec4e9-0847-4d54-9d7a-e9455e8e85ec/ovsdbserver-nb/0.log" Jan 22 18:10:23 crc kubenswrapper[5032]: I0122 18:10:23.137440 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_277fb0f4-f6b8-4e75-91dc-9879c7781871/openstack-network-exporter/0.log" Jan 22 18:10:23 crc kubenswrapper[5032]: I0122 18:10:23.180044 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_277fb0f4-f6b8-4e75-91dc-9879c7781871/ovsdbserver-sb/0.log" Jan 22 18:10:23 crc kubenswrapper[5032]: I0122 18:10:23.806471 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_9c0805a3-4319-469b-8a2a-e53998fd9166/setup-container/0.log" Jan 22 18:10:23 crc kubenswrapper[5032]: I0122 18:10:23.986175 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-697d8b94d8-5r62k_ac44c87a-d256-43f2-9dcb-b7b398ff8b6b/placement-api/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.052916 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_9c0805a3-4319-469b-8a2a-e53998fd9166/setup-container/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.147647 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-697d8b94d8-5r62k_ac44c87a-d256-43f2-9dcb-b7b398ff8b6b/placement-log/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.185907 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_9c0805a3-4319-469b-8a2a-e53998fd9166/rabbitmq/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.404170 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d91220a8-ad5e-4caf-872f-b4f7e059ae5a/setup-container/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.476493 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_125c93f8-f508-4bd7-a52e-b98f2c3212d6/nova-metadata-metadata/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.550434 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d91220a8-ad5e-4caf-872f-b4f7e059ae5a/setup-container/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.599699 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d91220a8-ad5e-4caf-872f-b4f7e059ae5a/rabbitmq/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.786434 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-5l8q6_befce4e5-991c-438e-ac82-fa7f75091418/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:24 crc kubenswrapper[5032]: I0122 18:10:24.855252 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-22g2w_564e7d8f-ca5b-46fe-9894-e27f6e2b0385/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:25 crc kubenswrapper[5032]: I0122 18:10:25.082051 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-rwnd7_86b36ce9-5adf-443b-99ec-11d21696562b/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:25 crc kubenswrapper[5032]: I0122 18:10:25.091653 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-6pcqf_d559e797-49a2-4f7a-827c-d3d2c48563c9/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:25 crc kubenswrapper[5032]: I0122 18:10:25.402985 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-94c2m_805a1eec-0fc3-4f40-8b05-92d0911b636f/ssh-known-hosts-edpm-deployment/0.log" Jan 22 18:10:25 crc kubenswrapper[5032]: I0122 18:10:25.590153 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-659fb8687c-zp7xh_7356e680-8b67-44b4-8cff-ea118c682c66/proxy-server/0.log" Jan 22 18:10:25 crc kubenswrapper[5032]: I0122 18:10:25.613276 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-v5spw_14ae4f20-7067-4eec-b6da-d7e6dce57195/swift-ring-rebalance/0.log" Jan 22 18:10:25 crc kubenswrapper[5032]: I0122 18:10:25.703565 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-659fb8687c-zp7xh_7356e680-8b67-44b4-8cff-ea118c682c66/proxy-httpd/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.077367 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/account-auditor/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.094350 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/account-reaper/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.190652 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/account-replicator/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.284186 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/account-server/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.303449 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/container-auditor/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.408971 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/container-server/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.446069 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/container-replicator/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.535546 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/container-updater/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.646799 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/object-expirer/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.741153 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/object-auditor/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.751000 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/object-replicator/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.790958 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/object-server/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.846173 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/object-updater/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.926848 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/rsync/0.log" Jan 22 18:10:26 crc kubenswrapper[5032]: I0122 18:10:26.987004 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a2f2285d-ead9-458a-a0ef-ed666b86e12f/swift-recon-cron/0.log" Jan 22 18:10:27 crc kubenswrapper[5032]: I0122 18:10:27.172978 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-dc2f4_fc477587-ce40-4773-b6a3-3baefaff6b24/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:27 crc kubenswrapper[5032]: I0122 18:10:27.228144 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-fp87s_d38e2b23-9fdc-47dd-9865-abf873d8bc74/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 22 18:10:28 crc kubenswrapper[5032]: I0122 18:10:28.316673 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_8122c1e4-e2c9-4d65-bd43-be04477fd674/memcached/0.log" Jan 22 18:10:50 crc kubenswrapper[5032]: I0122 18:10:50.468053 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb_75283565-e2e5-46a8-a168-553c6c9cadc5/util/0.log" Jan 22 18:10:50 crc kubenswrapper[5032]: I0122 18:10:50.642252 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb_75283565-e2e5-46a8-a168-553c6c9cadc5/util/0.log" Jan 22 18:10:50 crc kubenswrapper[5032]: I0122 18:10:50.652841 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb_75283565-e2e5-46a8-a168-553c6c9cadc5/pull/0.log" Jan 22 18:10:50 crc kubenswrapper[5032]: I0122 18:10:50.692625 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb_75283565-e2e5-46a8-a168-553c6c9cadc5/pull/0.log" Jan 22 18:10:50 crc kubenswrapper[5032]: I0122 18:10:50.851950 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb_75283565-e2e5-46a8-a168-553c6c9cadc5/extract/0.log" Jan 22 18:10:50 crc kubenswrapper[5032]: I0122 18:10:50.873487 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb_75283565-e2e5-46a8-a168-553c6c9cadc5/util/0.log" Jan 22 18:10:50 crc kubenswrapper[5032]: I0122 18:10:50.877646 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b1b2e2a1e823ae2dc23c9c1b13be83bbc37eda94c625a05bbda0d7ca14fntwb_75283565-e2e5-46a8-a168-553c6c9cadc5/pull/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.100469 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-59dd8b7cbf-wzh72_7f26e503-3b5f-4a4d-9b23-c8b742fef2a9/manager/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.153543 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-69cf5d4557-k9425_57b66df0-2faf-4f1a-980c-55094e9abda4/manager/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.291664 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-4gkbq_e558895e-9fa5-46dc-a852-2b876aa9d8f3/manager/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.440398 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-9r28s_e09867e2-02a7-4b8d-a388-78cb4a00b687/manager/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.521794 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-jtrqh_b0a92c7d-3eb6-4d18-a03c-82b380ba29d9/manager/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.600003 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-jtvgl_f7d15cd8-d231-493f-ace5-b52e78dff983/manager/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.823574 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-69d6c9f5b8-46xgl_550ebf15-3945-4782-b3b0-c32a34743e65/manager/0.log" Jan 22 18:10:51 crc kubenswrapper[5032]: I0122 18:10:51.976400 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-54ccf4f85d-42r27_59dad380-97a8-4d1a-9dd7-823d8be84b09/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.067307 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-nqhbr_62453126-30f2-45a5-9b4d-d4844d39107d/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.125972 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-th8gj_d7204606-7e41-46b7-bc8f-ffb0088f839e/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.271123 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-28l4v_f4c46099-26f5-4475-976d-1852dd96b9f8/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.421157 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-8597d6d6d5-2wsj8_8556c821-4b39-4898-8426-197ab92a52ec/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.511272 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6b8bc8d87d-7xr7k_c4713406-4a5e-4bfd-9a06-3c233d2875f4/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.599885 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7bd9774b6-4dtdf_c9e7a4d8-5fd5-470c-9c79-bfb55ba57ae5/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.701981 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854h5ds8_f9abaa76-29ed-4f41-9e98-0d6852a98e01/manager/0.log" Jan 22 18:10:52 crc kubenswrapper[5032]: I0122 18:10:52.859591 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-79b4b78677-pjcqc_81045473-a36f-4725-8a9c-d7de6defd405/operator/0.log" Jan 22 18:10:53 crc kubenswrapper[5032]: I0122 18:10:53.127439 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-pdtz7_19b316b1-1229-4a95-9d08-4a426582d827/registry-server/0.log" Jan 22 18:10:53 crc kubenswrapper[5032]: I0122 18:10:53.272683 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-ljq9b_a4c62d00-dba9-4309-9588-e5cb36dd675f/manager/0.log" Jan 22 18:10:53 crc kubenswrapper[5032]: I0122 18:10:53.472770 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5d646b7d76-5cxj6_9d75008c-d650-41f9-9c0f-beceefae917f/manager/0.log" Jan 22 18:10:53 crc kubenswrapper[5032]: I0122 18:10:53.707564 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-46dr5_fc432eb8-8449-4f05-9441-18726488a595/operator/0.log" Jan 22 18:10:53 crc kubenswrapper[5032]: I0122 18:10:53.779510 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-cqbtr_e7b1c714-1088-419e-9e6b-d553a73583a5/manager/0.log" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.063147 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-x8dvh_ff38f03d-52c0-42dc-85e9-9a67709d0d74/manager/0.log" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.253819 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-w27st_4f11f845-b9f4-4720-bddd-e5c56ef8a893/manager/0.log" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.345216 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qqdd9"] Jan 22 18:10:54 crc kubenswrapper[5032]: E0122 18:10:54.365203 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a360698-16bf-468f-9bfd-51cdb4c9fb72" containerName="container-00" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.365245 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a360698-16bf-468f-9bfd-51cdb4c9fb72" containerName="container-00" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.365625 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a360698-16bf-468f-9bfd-51cdb4c9fb72" containerName="container-00" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.367507 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.375256 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qqdd9"] Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.406291 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7f64d4d54c-p884k_ca22f41b-0f0c-4a2d-b99c-271fa3e4cefb/manager/0.log" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.433593 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5ffb9c6597-g98vr_739bb968-5cd4-460a-a812-dba945e8a695/manager/0.log" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.508889 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndnrg\" (UniqueName: \"kubernetes.io/projected/90bbcbb9-9291-47d1-adf5-4209c270d3a0-kube-api-access-ndnrg\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.509067 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-catalog-content\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.509253 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-utilities\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.610735 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-catalog-content\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.610871 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-utilities\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.610957 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndnrg\" (UniqueName: \"kubernetes.io/projected/90bbcbb9-9291-47d1-adf5-4209c270d3a0-kube-api-access-ndnrg\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.611754 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-catalog-content\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.612059 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-utilities\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.633327 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndnrg\" (UniqueName: \"kubernetes.io/projected/90bbcbb9-9291-47d1-adf5-4209c270d3a0-kube-api-access-ndnrg\") pod \"redhat-operators-qqdd9\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:54 crc kubenswrapper[5032]: I0122 18:10:54.706723 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:10:55 crc kubenswrapper[5032]: I0122 18:10:55.211477 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qqdd9"] Jan 22 18:10:55 crc kubenswrapper[5032]: I0122 18:10:55.963674 5032 generic.go:334] "Generic (PLEG): container finished" podID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerID="0f25fae7d70f71721e87b9c35d55d95972fd6a028537eaf19770cc34cba38316" exitCode=0 Jan 22 18:10:55 crc kubenswrapper[5032]: I0122 18:10:55.963832 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qqdd9" event={"ID":"90bbcbb9-9291-47d1-adf5-4209c270d3a0","Type":"ContainerDied","Data":"0f25fae7d70f71721e87b9c35d55d95972fd6a028537eaf19770cc34cba38316"} Jan 22 18:10:55 crc kubenswrapper[5032]: I0122 18:10:55.964286 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qqdd9" event={"ID":"90bbcbb9-9291-47d1-adf5-4209c270d3a0","Type":"ContainerStarted","Data":"0823b16654f90983b3470ae5d5c644236f88b5e7ceb10f3645d12fc6757555f8"} Jan 22 18:10:56 crc kubenswrapper[5032]: I0122 18:10:56.976237 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qqdd9" event={"ID":"90bbcbb9-9291-47d1-adf5-4209c270d3a0","Type":"ContainerStarted","Data":"51ee817a777e9eaaa98d482e868dba6caa0bbe11a4e451481ef9309a64fc5f90"} Jan 22 18:11:02 crc kubenswrapper[5032]: I0122 18:11:02.021779 5032 generic.go:334] "Generic (PLEG): container finished" podID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerID="51ee817a777e9eaaa98d482e868dba6caa0bbe11a4e451481ef9309a64fc5f90" exitCode=0 Jan 22 18:11:02 crc kubenswrapper[5032]: I0122 18:11:02.021812 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qqdd9" event={"ID":"90bbcbb9-9291-47d1-adf5-4209c270d3a0","Type":"ContainerDied","Data":"51ee817a777e9eaaa98d482e868dba6caa0bbe11a4e451481ef9309a64fc5f90"} Jan 22 18:11:03 crc kubenswrapper[5032]: I0122 18:11:03.036910 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qqdd9" event={"ID":"90bbcbb9-9291-47d1-adf5-4209c270d3a0","Type":"ContainerStarted","Data":"70c32896f977a0918bdbc47dea4cb0df495e421cd9ac451b83cf3d25d91090f5"} Jan 22 18:11:03 crc kubenswrapper[5032]: I0122 18:11:03.061909 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qqdd9" podStartSLOduration=2.562757126 podStartE2EDuration="9.061889446s" podCreationTimestamp="2026-01-22 18:10:54 +0000 UTC" firstStartedPulling="2026-01-22 18:10:55.966203297 +0000 UTC m=+7559.778372328" lastFinishedPulling="2026-01-22 18:11:02.465335607 +0000 UTC m=+7566.277504648" observedRunningTime="2026-01-22 18:11:03.055220063 +0000 UTC m=+7566.867389094" watchObservedRunningTime="2026-01-22 18:11:03.061889446 +0000 UTC m=+7566.874058497" Jan 22 18:11:04 crc kubenswrapper[5032]: I0122 18:11:04.708904 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:11:04 crc kubenswrapper[5032]: I0122 18:11:04.709540 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:11:05 crc kubenswrapper[5032]: I0122 18:11:05.757156 5032 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qqdd9" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="registry-server" probeResult="failure" output=< Jan 22 18:11:05 crc kubenswrapper[5032]: timeout: failed to connect service ":50051" within 1s Jan 22 18:11:05 crc kubenswrapper[5032]: > Jan 22 18:11:13 crc kubenswrapper[5032]: I0122 18:11:13.715248 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-qrkp4_e1d0b498-9c35-45b1-aaa2-45ba04eb1e1a/control-plane-machine-set-operator/0.log" Jan 22 18:11:13 crc kubenswrapper[5032]: I0122 18:11:13.865404 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-fthxg_dfe97498-e7f6-4e19-ad51-65c34957b415/kube-rbac-proxy/0.log" Jan 22 18:11:13 crc kubenswrapper[5032]: I0122 18:11:13.889369 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-fthxg_dfe97498-e7f6-4e19-ad51-65c34957b415/machine-api-operator/0.log" Jan 22 18:11:14 crc kubenswrapper[5032]: I0122 18:11:14.763169 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:11:14 crc kubenswrapper[5032]: I0122 18:11:14.820815 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.396200 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wzjsb"] Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.398252 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.403074 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-catalog-content\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.403143 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-utilities\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.403309 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfhpq\" (UniqueName: \"kubernetes.io/projected/913af4f9-b225-4476-9391-aa2ada7420a0-kube-api-access-jfhpq\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.411938 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzjsb"] Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.505082 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-utilities\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.505299 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfhpq\" (UniqueName: \"kubernetes.io/projected/913af4f9-b225-4476-9391-aa2ada7420a0-kube-api-access-jfhpq\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.505357 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-catalog-content\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.505659 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-utilities\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.505750 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-catalog-content\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.532439 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfhpq\" (UniqueName: \"kubernetes.io/projected/913af4f9-b225-4476-9391-aa2ada7420a0-kube-api-access-jfhpq\") pod \"redhat-marketplace-wzjsb\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:15 crc kubenswrapper[5032]: I0122 18:11:15.724673 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:16 crc kubenswrapper[5032]: I0122 18:11:16.278411 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzjsb"] Jan 22 18:11:17 crc kubenswrapper[5032]: I0122 18:11:17.161189 5032 generic.go:334] "Generic (PLEG): container finished" podID="913af4f9-b225-4476-9391-aa2ada7420a0" containerID="c5ac4d2aca09527e6bce84034c6da7560b0f38e507f0d009c32e608361fa57d9" exitCode=0 Jan 22 18:11:17 crc kubenswrapper[5032]: I0122 18:11:17.161367 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzjsb" event={"ID":"913af4f9-b225-4476-9391-aa2ada7420a0","Type":"ContainerDied","Data":"c5ac4d2aca09527e6bce84034c6da7560b0f38e507f0d009c32e608361fa57d9"} Jan 22 18:11:17 crc kubenswrapper[5032]: I0122 18:11:17.161504 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzjsb" event={"ID":"913af4f9-b225-4476-9391-aa2ada7420a0","Type":"ContainerStarted","Data":"f3ea129f25b118cfebe724afd67d2adce76f9026ffc9746c1183b20dfffddf31"} Jan 22 18:11:17 crc kubenswrapper[5032]: I0122 18:11:17.988631 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qqdd9"] Jan 22 18:11:17 crc kubenswrapper[5032]: I0122 18:11:17.989313 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qqdd9" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="registry-server" containerID="cri-o://70c32896f977a0918bdbc47dea4cb0df495e421cd9ac451b83cf3d25d91090f5" gracePeriod=2 Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.224664 5032 generic.go:334] "Generic (PLEG): container finished" podID="913af4f9-b225-4476-9391-aa2ada7420a0" containerID="cbd322fffe91605c34a3c98818358c1899cd5d4189831757ce8bfd16fb08dde4" exitCode=0 Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.225073 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzjsb" event={"ID":"913af4f9-b225-4476-9391-aa2ada7420a0","Type":"ContainerDied","Data":"cbd322fffe91605c34a3c98818358c1899cd5d4189831757ce8bfd16fb08dde4"} Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.230790 5032 generic.go:334] "Generic (PLEG): container finished" podID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerID="70c32896f977a0918bdbc47dea4cb0df495e421cd9ac451b83cf3d25d91090f5" exitCode=0 Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.230837 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qqdd9" event={"ID":"90bbcbb9-9291-47d1-adf5-4209c270d3a0","Type":"ContainerDied","Data":"70c32896f977a0918bdbc47dea4cb0df495e421cd9ac451b83cf3d25d91090f5"} Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.540502 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.662179 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-catalog-content\") pod \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.662588 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-utilities\") pod \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.662619 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndnrg\" (UniqueName: \"kubernetes.io/projected/90bbcbb9-9291-47d1-adf5-4209c270d3a0-kube-api-access-ndnrg\") pod \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\" (UID: \"90bbcbb9-9291-47d1-adf5-4209c270d3a0\") " Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.663399 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-utilities" (OuterVolumeSpecName: "utilities") pod "90bbcbb9-9291-47d1-adf5-4209c270d3a0" (UID: "90bbcbb9-9291-47d1-adf5-4209c270d3a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.667703 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90bbcbb9-9291-47d1-adf5-4209c270d3a0-kube-api-access-ndnrg" (OuterVolumeSpecName: "kube-api-access-ndnrg") pod "90bbcbb9-9291-47d1-adf5-4209c270d3a0" (UID: "90bbcbb9-9291-47d1-adf5-4209c270d3a0"). InnerVolumeSpecName "kube-api-access-ndnrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.765243 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.766613 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndnrg\" (UniqueName: \"kubernetes.io/projected/90bbcbb9-9291-47d1-adf5-4209c270d3a0-kube-api-access-ndnrg\") on node \"crc\" DevicePath \"\"" Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.778603 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90bbcbb9-9291-47d1-adf5-4209c270d3a0" (UID: "90bbcbb9-9291-47d1-adf5-4209c270d3a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:11:18 crc kubenswrapper[5032]: I0122 18:11:18.868991 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bbcbb9-9291-47d1-adf5-4209c270d3a0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.243180 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzjsb" event={"ID":"913af4f9-b225-4476-9391-aa2ada7420a0","Type":"ContainerStarted","Data":"86b514f4ff988e0938765ac230c4a00a2500f8c81b00f8f950d13e1cf12412fc"} Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.246131 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qqdd9" event={"ID":"90bbcbb9-9291-47d1-adf5-4209c270d3a0","Type":"ContainerDied","Data":"0823b16654f90983b3470ae5d5c644236f88b5e7ceb10f3645d12fc6757555f8"} Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.246207 5032 scope.go:117] "RemoveContainer" containerID="70c32896f977a0918bdbc47dea4cb0df495e421cd9ac451b83cf3d25d91090f5" Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.246380 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qqdd9" Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.267403 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wzjsb" podStartSLOduration=2.780113449 podStartE2EDuration="4.267380822s" podCreationTimestamp="2026-01-22 18:11:15 +0000 UTC" firstStartedPulling="2026-01-22 18:11:17.163004953 +0000 UTC m=+7580.975173994" lastFinishedPulling="2026-01-22 18:11:18.650272336 +0000 UTC m=+7582.462441367" observedRunningTime="2026-01-22 18:11:19.26653154 +0000 UTC m=+7583.078700581" watchObservedRunningTime="2026-01-22 18:11:19.267380822 +0000 UTC m=+7583.079549853" Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.280126 5032 scope.go:117] "RemoveContainer" containerID="51ee817a777e9eaaa98d482e868dba6caa0bbe11a4e451481ef9309a64fc5f90" Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.294034 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qqdd9"] Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.312498 5032 scope.go:117] "RemoveContainer" containerID="0f25fae7d70f71721e87b9c35d55d95972fd6a028537eaf19770cc34cba38316" Jan 22 18:11:19 crc kubenswrapper[5032]: I0122 18:11:19.321515 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qqdd9"] Jan 22 18:11:20 crc kubenswrapper[5032]: I0122 18:11:20.467623 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" path="/var/lib/kubelet/pods/90bbcbb9-9291-47d1-adf5-4209c270d3a0/volumes" Jan 22 18:11:25 crc kubenswrapper[5032]: I0122 18:11:25.725042 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:25 crc kubenswrapper[5032]: I0122 18:11:25.726526 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:25 crc kubenswrapper[5032]: I0122 18:11:25.781992 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:26 crc kubenswrapper[5032]: I0122 18:11:26.352380 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:26 crc kubenswrapper[5032]: I0122 18:11:26.404603 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzjsb"] Jan 22 18:11:28 crc kubenswrapper[5032]: I0122 18:11:28.325741 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wzjsb" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="registry-server" containerID="cri-o://86b514f4ff988e0938765ac230c4a00a2500f8c81b00f8f950d13e1cf12412fc" gracePeriod=2 Jan 22 18:11:28 crc kubenswrapper[5032]: I0122 18:11:28.442260 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-96lql_147a9188-17a7-4cf4-91d1-ca1f033f2da6/cert-manager-controller/0.log" Jan 22 18:11:28 crc kubenswrapper[5032]: I0122 18:11:28.614079 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-ccw7p_631e824a-b3b5-4876-aefd-cda7db40252a/cert-manager-cainjector/0.log" Jan 22 18:11:28 crc kubenswrapper[5032]: I0122 18:11:28.836749 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-skzfx_6a0ca471-23dc-4eac-9c3a-0adfc1c0a003/cert-manager-webhook/0.log" Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.336456 5032 generic.go:334] "Generic (PLEG): container finished" podID="913af4f9-b225-4476-9391-aa2ada7420a0" containerID="86b514f4ff988e0938765ac230c4a00a2500f8c81b00f8f950d13e1cf12412fc" exitCode=0 Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.336510 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzjsb" event={"ID":"913af4f9-b225-4476-9391-aa2ada7420a0","Type":"ContainerDied","Data":"86b514f4ff988e0938765ac230c4a00a2500f8c81b00f8f950d13e1cf12412fc"} Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.428850 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.597168 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-utilities\") pod \"913af4f9-b225-4476-9391-aa2ada7420a0\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.597341 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-catalog-content\") pod \"913af4f9-b225-4476-9391-aa2ada7420a0\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.597501 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfhpq\" (UniqueName: \"kubernetes.io/projected/913af4f9-b225-4476-9391-aa2ada7420a0-kube-api-access-jfhpq\") pod \"913af4f9-b225-4476-9391-aa2ada7420a0\" (UID: \"913af4f9-b225-4476-9391-aa2ada7420a0\") " Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.598274 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-utilities" (OuterVolumeSpecName: "utilities") pod "913af4f9-b225-4476-9391-aa2ada7420a0" (UID: "913af4f9-b225-4476-9391-aa2ada7420a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.607232 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/913af4f9-b225-4476-9391-aa2ada7420a0-kube-api-access-jfhpq" (OuterVolumeSpecName: "kube-api-access-jfhpq") pod "913af4f9-b225-4476-9391-aa2ada7420a0" (UID: "913af4f9-b225-4476-9391-aa2ada7420a0"). InnerVolumeSpecName "kube-api-access-jfhpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.626776 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "913af4f9-b225-4476-9391-aa2ada7420a0" (UID: "913af4f9-b225-4476-9391-aa2ada7420a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.700130 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfhpq\" (UniqueName: \"kubernetes.io/projected/913af4f9-b225-4476-9391-aa2ada7420a0-kube-api-access-jfhpq\") on node \"crc\" DevicePath \"\"" Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.700176 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 18:11:29 crc kubenswrapper[5032]: I0122 18:11:29.700188 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/913af4f9-b225-4476-9391-aa2ada7420a0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.348154 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzjsb" event={"ID":"913af4f9-b225-4476-9391-aa2ada7420a0","Type":"ContainerDied","Data":"f3ea129f25b118cfebe724afd67d2adce76f9026ffc9746c1183b20dfffddf31"} Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.348244 5032 scope.go:117] "RemoveContainer" containerID="86b514f4ff988e0938765ac230c4a00a2500f8c81b00f8f950d13e1cf12412fc" Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.348274 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzjsb" Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.374492 5032 scope.go:117] "RemoveContainer" containerID="cbd322fffe91605c34a3c98818358c1899cd5d4189831757ce8bfd16fb08dde4" Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.390785 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzjsb"] Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.408370 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzjsb"] Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.409110 5032 scope.go:117] "RemoveContainer" containerID="c5ac4d2aca09527e6bce84034c6da7560b0f38e507f0d009c32e608361fa57d9" Jan 22 18:11:30 crc kubenswrapper[5032]: I0122 18:11:30.468703 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" path="/var/lib/kubelet/pods/913af4f9-b225-4476-9391-aa2ada7420a0/volumes" Jan 22 18:11:33 crc kubenswrapper[5032]: I0122 18:11:33.016690 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:11:33 crc kubenswrapper[5032]: I0122 18:11:33.017078 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:11:40 crc kubenswrapper[5032]: I0122 18:11:40.673217 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-gl429_d647a337-b4fc-4c83-8bc5-1efc10b5eda5/nmstate-console-plugin/0.log" Jan 22 18:11:40 crc kubenswrapper[5032]: I0122 18:11:40.841467 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-znmml_f431be2d-ceb6-4b82-a770-6827a13d1bb1/nmstate-handler/0.log" Jan 22 18:11:40 crc kubenswrapper[5032]: I0122 18:11:40.901635 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-l4hfh_d0470720-9420-4c28-9b86-07d2fabd98eb/kube-rbac-proxy/0.log" Jan 22 18:11:40 crc kubenswrapper[5032]: I0122 18:11:40.992722 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-l4hfh_d0470720-9420-4c28-9b86-07d2fabd98eb/nmstate-metrics/0.log" Jan 22 18:11:41 crc kubenswrapper[5032]: I0122 18:11:41.075014 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-x2pwd_2b63c1dc-4cef-491f-b146-cdad3ca66d18/nmstate-operator/0.log" Jan 22 18:11:41 crc kubenswrapper[5032]: I0122 18:11:41.168772 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-rv2xr_0b20496f-a661-4546-b7fc-dd3be2b1914c/nmstate-webhook/0.log" Jan 22 18:12:03 crc kubenswrapper[5032]: I0122 18:12:03.018074 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:12:03 crc kubenswrapper[5032]: I0122 18:12:03.018532 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.247241 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-pskfs_86cb2f0d-5cf4-438d-9bd2-90e26f223d16/controller/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.265771 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-pskfs_86cb2f0d-5cf4-438d-9bd2-90e26f223d16/kube-rbac-proxy/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.405288 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-frr-files/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.602172 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-metrics/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.617829 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-reloader/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.634559 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-frr-files/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.655566 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-reloader/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.802938 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-reloader/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.834837 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-metrics/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.838785 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-metrics/0.log" Jan 22 18:12:07 crc kubenswrapper[5032]: I0122 18:12:07.847175 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-frr-files/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.009512 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-frr-files/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.018630 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-metrics/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.019191 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/cp-reloader/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.046098 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/controller/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.177085 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/frr-metrics/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.201737 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/kube-rbac-proxy/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.229434 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/kube-rbac-proxy-frr/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.393736 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/reloader/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.486088 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-9ms7l_42fa9b50-fc10-4e57-a693-9b3d41a01ebe/frr-k8s-webhook-server/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.643618 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-79794f8d77-hg8k8_2e6a0f38-95b2-48f5-80f9-612c5508912d/manager/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.847678 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7c755f5864-r5hgn_8dc54e54-b180-487b-b1d9-270516457d4f/webhook-server/0.log" Jan 22 18:12:08 crc kubenswrapper[5032]: I0122 18:12:08.907316 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-nhgx7_8c978f67-ff6a-436c-b801-c7d88a0550e7/kube-rbac-proxy/0.log" Jan 22 18:12:09 crc kubenswrapper[5032]: I0122 18:12:09.558472 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-nhgx7_8c978f67-ff6a-436c-b801-c7d88a0550e7/speaker/0.log" Jan 22 18:12:10 crc kubenswrapper[5032]: I0122 18:12:10.147106 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bkbn9_e4695925-6a14-469e-848d-4f4c05601c65/frr/0.log" Jan 22 18:12:20 crc kubenswrapper[5032]: I0122 18:12:20.933556 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg_7b3f8a94-b262-4f1b-b42a-5cea8fad124b/util/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.113502 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg_7b3f8a94-b262-4f1b-b42a-5cea8fad124b/util/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.163211 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg_7b3f8a94-b262-4f1b-b42a-5cea8fad124b/pull/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.220506 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg_7b3f8a94-b262-4f1b-b42a-5cea8fad124b/pull/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.416487 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg_7b3f8a94-b262-4f1b-b42a-5cea8fad124b/extract/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.427297 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg_7b3f8a94-b262-4f1b-b42a-5cea8fad124b/util/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.457074 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcr7kfg_7b3f8a94-b262-4f1b-b42a-5cea8fad124b/pull/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.598047 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw_bb030491-c854-459b-9688-96a54419708f/util/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.775957 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw_bb030491-c854-459b-9688-96a54419708f/util/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.777783 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw_bb030491-c854-459b-9688-96a54419708f/pull/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.787077 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw_bb030491-c854-459b-9688-96a54419708f/pull/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.932125 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw_bb030491-c854-459b-9688-96a54419708f/pull/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.983191 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw_bb030491-c854-459b-9688-96a54419708f/util/0.log" Jan 22 18:12:21 crc kubenswrapper[5032]: I0122 18:12:21.987018 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713zlchw_bb030491-c854-459b-9688-96a54419708f/extract/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.107417 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n29hj_a66a029a-23a5-4b4c-89b4-bfa344805527/extract-utilities/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.313590 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n29hj_a66a029a-23a5-4b4c-89b4-bfa344805527/extract-content/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.323207 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n29hj_a66a029a-23a5-4b4c-89b4-bfa344805527/extract-content/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.336249 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n29hj_a66a029a-23a5-4b4c-89b4-bfa344805527/extract-utilities/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.508349 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n29hj_a66a029a-23a5-4b4c-89b4-bfa344805527/extract-utilities/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.567095 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n29hj_a66a029a-23a5-4b4c-89b4-bfa344805527/extract-content/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.724381 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l4v2_75419f93-5572-44b2-a1b3-ddc47833b19d/extract-utilities/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.942041 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l4v2_75419f93-5572-44b2-a1b3-ddc47833b19d/extract-utilities/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.949265 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l4v2_75419f93-5572-44b2-a1b3-ddc47833b19d/extract-content/0.log" Jan 22 18:12:22 crc kubenswrapper[5032]: I0122 18:12:22.990218 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n29hj_a66a029a-23a5-4b4c-89b4-bfa344805527/registry-server/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.029678 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l4v2_75419f93-5572-44b2-a1b3-ddc47833b19d/extract-content/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.130498 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l4v2_75419f93-5572-44b2-a1b3-ddc47833b19d/extract-utilities/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.174744 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l4v2_75419f93-5572-44b2-a1b3-ddc47833b19d/extract-content/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.398637 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qfphp_611edd22-6336-4d33-9dbd-fcceb3246fb0/marketplace-operator/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.524162 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-pbjpb_1daf631c-f05c-4a21-acf1-1bf108a35cc9/extract-utilities/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.643817 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l4v2_75419f93-5572-44b2-a1b3-ddc47833b19d/registry-server/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.727755 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-pbjpb_1daf631c-f05c-4a21-acf1-1bf108a35cc9/extract-utilities/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.767632 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-pbjpb_1daf631c-f05c-4a21-acf1-1bf108a35cc9/extract-content/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.772551 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-pbjpb_1daf631c-f05c-4a21-acf1-1bf108a35cc9/extract-content/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.933037 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-pbjpb_1daf631c-f05c-4a21-acf1-1bf108a35cc9/extract-utilities/0.log" Jan 22 18:12:23 crc kubenswrapper[5032]: I0122 18:12:23.974780 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-pbjpb_1daf631c-f05c-4a21-acf1-1bf108a35cc9/extract-content/0.log" Jan 22 18:12:24 crc kubenswrapper[5032]: I0122 18:12:24.174404 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nfp7l_32f503dc-221c-4920-a207-f3651dd990df/extract-utilities/0.log" Jan 22 18:12:24 crc kubenswrapper[5032]: I0122 18:12:24.227647 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-pbjpb_1daf631c-f05c-4a21-acf1-1bf108a35cc9/registry-server/0.log" Jan 22 18:12:24 crc kubenswrapper[5032]: I0122 18:12:24.380653 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nfp7l_32f503dc-221c-4920-a207-f3651dd990df/extract-content/0.log" Jan 22 18:12:24 crc kubenswrapper[5032]: I0122 18:12:24.381492 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nfp7l_32f503dc-221c-4920-a207-f3651dd990df/extract-utilities/0.log" Jan 22 18:12:24 crc kubenswrapper[5032]: I0122 18:12:24.396054 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nfp7l_32f503dc-221c-4920-a207-f3651dd990df/extract-content/0.log" Jan 22 18:12:24 crc kubenswrapper[5032]: I0122 18:12:24.553366 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nfp7l_32f503dc-221c-4920-a207-f3651dd990df/extract-utilities/0.log" Jan 22 18:12:24 crc kubenswrapper[5032]: I0122 18:12:24.563373 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nfp7l_32f503dc-221c-4920-a207-f3651dd990df/extract-content/0.log" Jan 22 18:12:25 crc kubenswrapper[5032]: I0122 18:12:25.310646 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nfp7l_32f503dc-221c-4920-a207-f3651dd990df/registry-server/0.log" Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.016475 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.017251 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.017319 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.018370 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d846a5f5f4cf9db4f36e12537e853c4be8c65828e889a120583571d42c5162c5"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.018466 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://d846a5f5f4cf9db4f36e12537e853c4be8c65828e889a120583571d42c5162c5" gracePeriod=600 Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.885968 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="d846a5f5f4cf9db4f36e12537e853c4be8c65828e889a120583571d42c5162c5" exitCode=0 Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.886060 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"d846a5f5f4cf9db4f36e12537e853c4be8c65828e889a120583571d42c5162c5"} Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.886552 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerStarted","Data":"5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef"} Jan 22 18:12:33 crc kubenswrapper[5032]: I0122 18:12:33.886576 5032 scope.go:117] "RemoveContainer" containerID="7603221d4bdfa90598d7f6175b654c518a2c71969b1ee099652d81cdc8ef8706" Jan 22 18:14:33 crc kubenswrapper[5032]: I0122 18:14:33.016740 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:14:33 crc kubenswrapper[5032]: I0122 18:14:33.017336 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:14:35 crc kubenswrapper[5032]: I0122 18:14:35.996303 5032 generic.go:334] "Generic (PLEG): container finished" podID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerID="3fbf036c96315838f89b57355b8c4f43f61074ce46654c5ced72bf5b507fb07a" exitCode=0 Jan 22 18:14:35 crc kubenswrapper[5032]: I0122 18:14:35.996393 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rcjg4/must-gather-vwqph" event={"ID":"2cad7eb8-5e19-45f7-850a-a931d5d3412b","Type":"ContainerDied","Data":"3fbf036c96315838f89b57355b8c4f43f61074ce46654c5ced72bf5b507fb07a"} Jan 22 18:14:35 crc kubenswrapper[5032]: I0122 18:14:35.998184 5032 scope.go:117] "RemoveContainer" containerID="3fbf036c96315838f89b57355b8c4f43f61074ce46654c5ced72bf5b507fb07a" Jan 22 18:14:36 crc kubenswrapper[5032]: I0122 18:14:36.161632 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rcjg4_must-gather-vwqph_2cad7eb8-5e19-45f7-850a-a931d5d3412b/gather/0.log" Jan 22 18:14:44 crc kubenswrapper[5032]: I0122 18:14:44.604252 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rcjg4/must-gather-vwqph"] Jan 22 18:14:44 crc kubenswrapper[5032]: I0122 18:14:44.605116 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-rcjg4/must-gather-vwqph" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerName="copy" containerID="cri-o://b86e35f2060dc28f17c26d20cb80c892ab419b996f2e7474def032cc742b9ad5" gracePeriod=2 Jan 22 18:14:44 crc kubenswrapper[5032]: I0122 18:14:44.614395 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rcjg4/must-gather-vwqph"] Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.075821 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rcjg4_must-gather-vwqph_2cad7eb8-5e19-45f7-850a-a931d5d3412b/copy/0.log" Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.076330 5032 generic.go:334] "Generic (PLEG): container finished" podID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerID="b86e35f2060dc28f17c26d20cb80c892ab419b996f2e7474def032cc742b9ad5" exitCode=143 Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.728545 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rcjg4_must-gather-vwqph_2cad7eb8-5e19-45f7-850a-a931d5d3412b/copy/0.log" Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.730252 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.804413 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d9g5\" (UniqueName: \"kubernetes.io/projected/2cad7eb8-5e19-45f7-850a-a931d5d3412b-kube-api-access-2d9g5\") pod \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.804596 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2cad7eb8-5e19-45f7-850a-a931d5d3412b-must-gather-output\") pod \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\" (UID: \"2cad7eb8-5e19-45f7-850a-a931d5d3412b\") " Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.813378 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cad7eb8-5e19-45f7-850a-a931d5d3412b-kube-api-access-2d9g5" (OuterVolumeSpecName: "kube-api-access-2d9g5") pod "2cad7eb8-5e19-45f7-850a-a931d5d3412b" (UID: "2cad7eb8-5e19-45f7-850a-a931d5d3412b"). InnerVolumeSpecName "kube-api-access-2d9g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:14:45 crc kubenswrapper[5032]: I0122 18:14:45.906503 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d9g5\" (UniqueName: \"kubernetes.io/projected/2cad7eb8-5e19-45f7-850a-a931d5d3412b-kube-api-access-2d9g5\") on node \"crc\" DevicePath \"\"" Jan 22 18:14:46 crc kubenswrapper[5032]: I0122 18:14:46.083115 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cad7eb8-5e19-45f7-850a-a931d5d3412b-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "2cad7eb8-5e19-45f7-850a-a931d5d3412b" (UID: "2cad7eb8-5e19-45f7-850a-a931d5d3412b"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:14:46 crc kubenswrapper[5032]: I0122 18:14:46.102276 5032 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rcjg4_must-gather-vwqph_2cad7eb8-5e19-45f7-850a-a931d5d3412b/copy/0.log" Jan 22 18:14:46 crc kubenswrapper[5032]: I0122 18:14:46.103229 5032 scope.go:117] "RemoveContainer" containerID="b86e35f2060dc28f17c26d20cb80c892ab419b996f2e7474def032cc742b9ad5" Jan 22 18:14:46 crc kubenswrapper[5032]: I0122 18:14:46.103448 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rcjg4/must-gather-vwqph" Jan 22 18:14:46 crc kubenswrapper[5032]: I0122 18:14:46.110824 5032 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2cad7eb8-5e19-45f7-850a-a931d5d3412b-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 22 18:14:46 crc kubenswrapper[5032]: I0122 18:14:46.135972 5032 scope.go:117] "RemoveContainer" containerID="3fbf036c96315838f89b57355b8c4f43f61074ce46654c5ced72bf5b507fb07a" Jan 22 18:14:46 crc kubenswrapper[5032]: I0122 18:14:46.466919 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" path="/var/lib/kubelet/pods/2cad7eb8-5e19-45f7-850a-a931d5d3412b/volumes" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.191037 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q"] Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192150 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="registry-server" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192170 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="registry-server" Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192195 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerName="gather" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192204 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerName="gather" Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192218 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerName="copy" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192226 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerName="copy" Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192260 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="extract-content" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192268 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="extract-content" Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192281 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="extract-content" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192288 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="extract-content" Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192314 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="extract-utilities" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192322 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="extract-utilities" Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192338 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="extract-utilities" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192345 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="extract-utilities" Jan 22 18:15:00 crc kubenswrapper[5032]: E0122 18:15:00.192355 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="registry-server" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192364 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="registry-server" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192609 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerName="copy" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192630 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="90bbcbb9-9291-47d1-adf5-4209c270d3a0" containerName="registry-server" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192642 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="913af4f9-b225-4476-9391-aa2ada7420a0" containerName="registry-server" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.192658 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cad7eb8-5e19-45f7-850a-a931d5d3412b" containerName="gather" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.193520 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.197101 5032 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.197513 5032 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.207405 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q"] Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.289360 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2ec7a85-379e-44b1-99ea-843c9dbaf905-config-volume\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.289789 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2ec7a85-379e-44b1-99ea-843c9dbaf905-secret-volume\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.289975 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghb2j\" (UniqueName: \"kubernetes.io/projected/a2ec7a85-379e-44b1-99ea-843c9dbaf905-kube-api-access-ghb2j\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.392776 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2ec7a85-379e-44b1-99ea-843c9dbaf905-secret-volume\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.392944 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghb2j\" (UniqueName: \"kubernetes.io/projected/a2ec7a85-379e-44b1-99ea-843c9dbaf905-kube-api-access-ghb2j\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.393081 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2ec7a85-379e-44b1-99ea-843c9dbaf905-config-volume\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.394616 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2ec7a85-379e-44b1-99ea-843c9dbaf905-config-volume\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.407478 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2ec7a85-379e-44b1-99ea-843c9dbaf905-secret-volume\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.409683 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghb2j\" (UniqueName: \"kubernetes.io/projected/a2ec7a85-379e-44b1-99ea-843c9dbaf905-kube-api-access-ghb2j\") pod \"collect-profiles-29485095-72c5q\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:00 crc kubenswrapper[5032]: I0122 18:15:00.521651 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:01 crc kubenswrapper[5032]: I0122 18:15:01.005758 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q"] Jan 22 18:15:01 crc kubenswrapper[5032]: I0122 18:15:01.255851 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" event={"ID":"a2ec7a85-379e-44b1-99ea-843c9dbaf905","Type":"ContainerStarted","Data":"61e84e53f289e185faf93a23b8e73655c023752b2a4e6619e5dfad3cb02215e1"} Jan 22 18:15:01 crc kubenswrapper[5032]: I0122 18:15:01.255918 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" event={"ID":"a2ec7a85-379e-44b1-99ea-843c9dbaf905","Type":"ContainerStarted","Data":"a6a48cb960ff28da4c0760120c24521bdf8ac09625ae5326a94805f2ca9930d2"} Jan 22 18:15:01 crc kubenswrapper[5032]: I0122 18:15:01.277160 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" podStartSLOduration=1.27713996 podStartE2EDuration="1.27713996s" podCreationTimestamp="2026-01-22 18:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 18:15:01.269372638 +0000 UTC m=+7805.081541689" watchObservedRunningTime="2026-01-22 18:15:01.27713996 +0000 UTC m=+7805.089308991" Jan 22 18:15:02 crc kubenswrapper[5032]: I0122 18:15:02.265918 5032 generic.go:334] "Generic (PLEG): container finished" podID="a2ec7a85-379e-44b1-99ea-843c9dbaf905" containerID="61e84e53f289e185faf93a23b8e73655c023752b2a4e6619e5dfad3cb02215e1" exitCode=0 Jan 22 18:15:02 crc kubenswrapper[5032]: I0122 18:15:02.266022 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" event={"ID":"a2ec7a85-379e-44b1-99ea-843c9dbaf905","Type":"ContainerDied","Data":"61e84e53f289e185faf93a23b8e73655c023752b2a4e6619e5dfad3cb02215e1"} Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.016938 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.017321 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.608021 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.659808 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2ec7a85-379e-44b1-99ea-843c9dbaf905-config-volume\") pod \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.659990 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2ec7a85-379e-44b1-99ea-843c9dbaf905-secret-volume\") pod \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.660050 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghb2j\" (UniqueName: \"kubernetes.io/projected/a2ec7a85-379e-44b1-99ea-843c9dbaf905-kube-api-access-ghb2j\") pod \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\" (UID: \"a2ec7a85-379e-44b1-99ea-843c9dbaf905\") " Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.660455 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2ec7a85-379e-44b1-99ea-843c9dbaf905-config-volume" (OuterVolumeSpecName: "config-volume") pod "a2ec7a85-379e-44b1-99ea-843c9dbaf905" (UID: "a2ec7a85-379e-44b1-99ea-843c9dbaf905"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.660609 5032 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2ec7a85-379e-44b1-99ea-843c9dbaf905-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.666529 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2ec7a85-379e-44b1-99ea-843c9dbaf905-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a2ec7a85-379e-44b1-99ea-843c9dbaf905" (UID: "a2ec7a85-379e-44b1-99ea-843c9dbaf905"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.666987 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2ec7a85-379e-44b1-99ea-843c9dbaf905-kube-api-access-ghb2j" (OuterVolumeSpecName: "kube-api-access-ghb2j") pod "a2ec7a85-379e-44b1-99ea-843c9dbaf905" (UID: "a2ec7a85-379e-44b1-99ea-843c9dbaf905"). InnerVolumeSpecName "kube-api-access-ghb2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.762285 5032 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2ec7a85-379e-44b1-99ea-843c9dbaf905-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 18:15:03 crc kubenswrapper[5032]: I0122 18:15:03.762329 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghb2j\" (UniqueName: \"kubernetes.io/projected/a2ec7a85-379e-44b1-99ea-843c9dbaf905-kube-api-access-ghb2j\") on node \"crc\" DevicePath \"\"" Jan 22 18:15:04 crc kubenswrapper[5032]: I0122 18:15:04.283178 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" event={"ID":"a2ec7a85-379e-44b1-99ea-843c9dbaf905","Type":"ContainerDied","Data":"a6a48cb960ff28da4c0760120c24521bdf8ac09625ae5326a94805f2ca9930d2"} Jan 22 18:15:04 crc kubenswrapper[5032]: I0122 18:15:04.283232 5032 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6a48cb960ff28da4c0760120c24521bdf8ac09625ae5326a94805f2ca9930d2" Jan 22 18:15:04 crc kubenswrapper[5032]: I0122 18:15:04.283711 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485095-72c5q" Jan 22 18:15:04 crc kubenswrapper[5032]: I0122 18:15:04.335897 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f"] Jan 22 18:15:04 crc kubenswrapper[5032]: I0122 18:15:04.344354 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485050-j6v5f"] Jan 22 18:15:04 crc kubenswrapper[5032]: I0122 18:15:04.465674 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd0dc812-d638-48d0-96f3-ecc27b10dcac" path="/var/lib/kubelet/pods/cd0dc812-d638-48d0-96f3-ecc27b10dcac/volumes" Jan 22 18:15:27 crc kubenswrapper[5032]: I0122 18:15:27.973894 5032 scope.go:117] "RemoveContainer" containerID="aefa8ee0765983450b208bd7e7884077ff3b206471168c74e7681a4bc835a18e" Jan 22 18:15:28 crc kubenswrapper[5032]: I0122 18:15:28.005517 5032 scope.go:117] "RemoveContainer" containerID="b975138e99b58f16f3694a59b01d4672f6665db5986e9711de548eb8ba8c9a7e" Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.017218 5032 patch_prober.go:28] interesting pod/machine-config-daemon-6qsfs container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.018454 5032 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.018542 5032 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.019775 5032 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef"} pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.019847 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerName="machine-config-daemon" containerID="cri-o://5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" gracePeriod=600 Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.538149 5032 generic.go:334] "Generic (PLEG): container finished" podID="fee2227c-a587-4ec3-909d-bd355aa116d4" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" exitCode=0 Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.538244 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" event={"ID":"fee2227c-a587-4ec3-909d-bd355aa116d4","Type":"ContainerDied","Data":"5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef"} Jan 22 18:15:33 crc kubenswrapper[5032]: I0122 18:15:33.538526 5032 scope.go:117] "RemoveContainer" containerID="d846a5f5f4cf9db4f36e12537e853c4be8c65828e889a120583571d42c5162c5" Jan 22 18:15:33 crc kubenswrapper[5032]: E0122 18:15:33.816166 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:15:34 crc kubenswrapper[5032]: I0122 18:15:34.549200 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:15:34 crc kubenswrapper[5032]: E0122 18:15:34.549522 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:15:49 crc kubenswrapper[5032]: I0122 18:15:49.455419 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:15:49 crc kubenswrapper[5032]: E0122 18:15:49.456227 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:16:02 crc kubenswrapper[5032]: I0122 18:16:02.455536 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:16:02 crc kubenswrapper[5032]: E0122 18:16:02.456452 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:16:16 crc kubenswrapper[5032]: I0122 18:16:16.461437 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:16:16 crc kubenswrapper[5032]: E0122 18:16:16.462318 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:16:28 crc kubenswrapper[5032]: I0122 18:16:28.112289 5032 scope.go:117] "RemoveContainer" containerID="f57812551338c4124916e1cf506d284d7fd3c60e1a389557475e6917c70702d2" Jan 22 18:16:31 crc kubenswrapper[5032]: I0122 18:16:31.456100 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:16:31 crc kubenswrapper[5032]: E0122 18:16:31.464121 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:16:42 crc kubenswrapper[5032]: I0122 18:16:42.454400 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:16:42 crc kubenswrapper[5032]: E0122 18:16:42.455531 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:16:57 crc kubenswrapper[5032]: I0122 18:16:57.454913 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:16:57 crc kubenswrapper[5032]: E0122 18:16:57.455587 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:17:08 crc kubenswrapper[5032]: I0122 18:17:08.455015 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:17:08 crc kubenswrapper[5032]: E0122 18:17:08.456537 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:17:19 crc kubenswrapper[5032]: I0122 18:17:19.455094 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:17:19 crc kubenswrapper[5032]: E0122 18:17:19.456204 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:17:32 crc kubenswrapper[5032]: I0122 18:17:32.454638 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:17:32 crc kubenswrapper[5032]: E0122 18:17:32.455293 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:17:43 crc kubenswrapper[5032]: I0122 18:17:43.454331 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:17:43 crc kubenswrapper[5032]: E0122 18:17:43.455071 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:17:54 crc kubenswrapper[5032]: I0122 18:17:54.454560 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:17:54 crc kubenswrapper[5032]: E0122 18:17:54.455364 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:18:08 crc kubenswrapper[5032]: I0122 18:18:08.455047 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:18:08 crc kubenswrapper[5032]: E0122 18:18:08.457943 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:18:19 crc kubenswrapper[5032]: I0122 18:18:19.455779 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:18:19 crc kubenswrapper[5032]: E0122 18:18:19.456771 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:18:33 crc kubenswrapper[5032]: I0122 18:18:33.455346 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:18:33 crc kubenswrapper[5032]: E0122 18:18:33.456130 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:18:46 crc kubenswrapper[5032]: I0122 18:18:46.460953 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:18:46 crc kubenswrapper[5032]: E0122 18:18:46.461994 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.765339 5032 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nvvqz"] Jan 22 18:18:48 crc kubenswrapper[5032]: E0122 18:18:48.766206 5032 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ec7a85-379e-44b1-99ea-843c9dbaf905" containerName="collect-profiles" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.766226 5032 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ec7a85-379e-44b1-99ea-843c9dbaf905" containerName="collect-profiles" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.766517 5032 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2ec7a85-379e-44b1-99ea-843c9dbaf905" containerName="collect-profiles" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.768603 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.784726 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nvvqz"] Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.896634 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-catalog-content\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.896685 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-utilities\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.896724 5032 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg9r7\" (UniqueName: \"kubernetes.io/projected/fd3bf405-f7ca-41b2-b728-39b59e03f344-kube-api-access-qg9r7\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.998722 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-catalog-content\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.998783 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-utilities\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.998820 5032 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg9r7\" (UniqueName: \"kubernetes.io/projected/fd3bf405-f7ca-41b2-b728-39b59e03f344-kube-api-access-qg9r7\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.999236 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-catalog-content\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:48 crc kubenswrapper[5032]: I0122 18:18:48.999354 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-utilities\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:49 crc kubenswrapper[5032]: I0122 18:18:49.023888 5032 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg9r7\" (UniqueName: \"kubernetes.io/projected/fd3bf405-f7ca-41b2-b728-39b59e03f344-kube-api-access-qg9r7\") pod \"certified-operators-nvvqz\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:49 crc kubenswrapper[5032]: I0122 18:18:49.090106 5032 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:49 crc kubenswrapper[5032]: I0122 18:18:49.597962 5032 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nvvqz"] Jan 22 18:18:50 crc kubenswrapper[5032]: I0122 18:18:50.376102 5032 generic.go:334] "Generic (PLEG): container finished" podID="fd3bf405-f7ca-41b2-b728-39b59e03f344" containerID="0e0077565a69e87487935953d0832b0ad4b66ea123cdbb4508e2111aa36a45b3" exitCode=0 Jan 22 18:18:50 crc kubenswrapper[5032]: I0122 18:18:50.376194 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvvqz" event={"ID":"fd3bf405-f7ca-41b2-b728-39b59e03f344","Type":"ContainerDied","Data":"0e0077565a69e87487935953d0832b0ad4b66ea123cdbb4508e2111aa36a45b3"} Jan 22 18:18:50 crc kubenswrapper[5032]: I0122 18:18:50.376686 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvvqz" event={"ID":"fd3bf405-f7ca-41b2-b728-39b59e03f344","Type":"ContainerStarted","Data":"2285d485dbcd0706268fe008403801eb0fdcf23dc10d712db3cc5edf91a4bed5"} Jan 22 18:18:50 crc kubenswrapper[5032]: I0122 18:18:50.379305 5032 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 18:18:52 crc kubenswrapper[5032]: I0122 18:18:52.395884 5032 generic.go:334] "Generic (PLEG): container finished" podID="fd3bf405-f7ca-41b2-b728-39b59e03f344" containerID="ee2c2ea53aed5a1edd2ca6b5ba176ef64a81c8fdb020cf48f85fe6fb9345bd29" exitCode=0 Jan 22 18:18:52 crc kubenswrapper[5032]: I0122 18:18:52.395897 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvvqz" event={"ID":"fd3bf405-f7ca-41b2-b728-39b59e03f344","Type":"ContainerDied","Data":"ee2c2ea53aed5a1edd2ca6b5ba176ef64a81c8fdb020cf48f85fe6fb9345bd29"} Jan 22 18:18:53 crc kubenswrapper[5032]: I0122 18:18:53.424782 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvvqz" event={"ID":"fd3bf405-f7ca-41b2-b728-39b59e03f344","Type":"ContainerStarted","Data":"b5963ab51804afe0da095abe1887313c8200423dc458b0a80ed1cb11825547e6"} Jan 22 18:18:53 crc kubenswrapper[5032]: I0122 18:18:53.457529 5032 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nvvqz" podStartSLOduration=2.733034389 podStartE2EDuration="5.457510237s" podCreationTimestamp="2026-01-22 18:18:48 +0000 UTC" firstStartedPulling="2026-01-22 18:18:50.379007054 +0000 UTC m=+8034.191176105" lastFinishedPulling="2026-01-22 18:18:53.103482922 +0000 UTC m=+8036.915651953" observedRunningTime="2026-01-22 18:18:53.455025011 +0000 UTC m=+8037.267194082" watchObservedRunningTime="2026-01-22 18:18:53.457510237 +0000 UTC m=+8037.269679268" Jan 22 18:18:59 crc kubenswrapper[5032]: I0122 18:18:59.092274 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:59 crc kubenswrapper[5032]: I0122 18:18:59.092833 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:59 crc kubenswrapper[5032]: I0122 18:18:59.156219 5032 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:59 crc kubenswrapper[5032]: I0122 18:18:59.455544 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:18:59 crc kubenswrapper[5032]: E0122 18:18:59.455840 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:18:59 crc kubenswrapper[5032]: I0122 18:18:59.526248 5032 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:18:59 crc kubenswrapper[5032]: I0122 18:18:59.586071 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nvvqz"] Jan 22 18:19:01 crc kubenswrapper[5032]: I0122 18:19:01.494327 5032 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nvvqz" podUID="fd3bf405-f7ca-41b2-b728-39b59e03f344" containerName="registry-server" containerID="cri-o://b5963ab51804afe0da095abe1887313c8200423dc458b0a80ed1cb11825547e6" gracePeriod=2 Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.505127 5032 generic.go:334] "Generic (PLEG): container finished" podID="fd3bf405-f7ca-41b2-b728-39b59e03f344" containerID="b5963ab51804afe0da095abe1887313c8200423dc458b0a80ed1cb11825547e6" exitCode=0 Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.505200 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvvqz" event={"ID":"fd3bf405-f7ca-41b2-b728-39b59e03f344","Type":"ContainerDied","Data":"b5963ab51804afe0da095abe1887313c8200423dc458b0a80ed1cb11825547e6"} Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.874639 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.980670 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-utilities\") pod \"fd3bf405-f7ca-41b2-b728-39b59e03f344\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.980783 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-catalog-content\") pod \"fd3bf405-f7ca-41b2-b728-39b59e03f344\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.980963 5032 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg9r7\" (UniqueName: \"kubernetes.io/projected/fd3bf405-f7ca-41b2-b728-39b59e03f344-kube-api-access-qg9r7\") pod \"fd3bf405-f7ca-41b2-b728-39b59e03f344\" (UID: \"fd3bf405-f7ca-41b2-b728-39b59e03f344\") " Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.982762 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-utilities" (OuterVolumeSpecName: "utilities") pod "fd3bf405-f7ca-41b2-b728-39b59e03f344" (UID: "fd3bf405-f7ca-41b2-b728-39b59e03f344"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:19:02 crc kubenswrapper[5032]: I0122 18:19:02.989349 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd3bf405-f7ca-41b2-b728-39b59e03f344-kube-api-access-qg9r7" (OuterVolumeSpecName: "kube-api-access-qg9r7") pod "fd3bf405-f7ca-41b2-b728-39b59e03f344" (UID: "fd3bf405-f7ca-41b2-b728-39b59e03f344"). InnerVolumeSpecName "kube-api-access-qg9r7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.029839 5032 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd3bf405-f7ca-41b2-b728-39b59e03f344" (UID: "fd3bf405-f7ca-41b2-b728-39b59e03f344"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.083539 5032 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg9r7\" (UniqueName: \"kubernetes.io/projected/fd3bf405-f7ca-41b2-b728-39b59e03f344-kube-api-access-qg9r7\") on node \"crc\" DevicePath \"\"" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.083577 5032 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.083590 5032 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3bf405-f7ca-41b2-b728-39b59e03f344-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.517101 5032 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvvqz" event={"ID":"fd3bf405-f7ca-41b2-b728-39b59e03f344","Type":"ContainerDied","Data":"2285d485dbcd0706268fe008403801eb0fdcf23dc10d712db3cc5edf91a4bed5"} Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.517181 5032 scope.go:117] "RemoveContainer" containerID="b5963ab51804afe0da095abe1887313c8200423dc458b0a80ed1cb11825547e6" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.518330 5032 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvvqz" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.546032 5032 scope.go:117] "RemoveContainer" containerID="ee2c2ea53aed5a1edd2ca6b5ba176ef64a81c8fdb020cf48f85fe6fb9345bd29" Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.565520 5032 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nvvqz"] Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.575062 5032 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nvvqz"] Jan 22 18:19:03 crc kubenswrapper[5032]: I0122 18:19:03.575372 5032 scope.go:117] "RemoveContainer" containerID="0e0077565a69e87487935953d0832b0ad4b66ea123cdbb4508e2111aa36a45b3" Jan 22 18:19:04 crc kubenswrapper[5032]: I0122 18:19:04.467359 5032 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd3bf405-f7ca-41b2-b728-39b59e03f344" path="/var/lib/kubelet/pods/fd3bf405-f7ca-41b2-b728-39b59e03f344/volumes" Jan 22 18:19:12 crc kubenswrapper[5032]: I0122 18:19:12.455375 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:19:12 crc kubenswrapper[5032]: E0122 18:19:12.456414 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:19:27 crc kubenswrapper[5032]: I0122 18:19:27.455203 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:19:27 crc kubenswrapper[5032]: E0122 18:19:27.456792 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:19:38 crc kubenswrapper[5032]: I0122 18:19:38.454784 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:19:38 crc kubenswrapper[5032]: E0122 18:19:38.455569 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:19:50 crc kubenswrapper[5032]: I0122 18:19:50.454963 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:19:50 crc kubenswrapper[5032]: E0122 18:19:50.455797 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" Jan 22 18:20:02 crc kubenswrapper[5032]: I0122 18:20:02.454605 5032 scope.go:117] "RemoveContainer" containerID="5eb68dcb6d79af4f9b7fd79689dcab19f05f307bcbbc15d437bafa08cb109cef" Jan 22 18:20:02 crc kubenswrapper[5032]: E0122 18:20:02.455398 5032 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6qsfs_openshift-machine-config-operator(fee2227c-a587-4ec3-909d-bd355aa116d4)\"" pod="openshift-machine-config-operator/machine-config-daemon-6qsfs" podUID="fee2227c-a587-4ec3-909d-bd355aa116d4" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515134465144024454 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015134465145017372 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015134444730016512 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015134444730015462 5ustar corecore